code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
a = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 169 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str]=13 , lowerCAmelCase : Optional[Any]=32 , lowerCAmelCase : Optional[int]=3 , lowerCAmelCase : List[Any]=4 , lowerCAmelCase : str=[10, 20, 30, 40] , lowerCAmelCase : Any=[2, 2, 3, 2] , lowerCAmelCase : Any=True , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Optional[Any]=37 , lowerCAmelCase : int="gelu" , lowerCAmelCase : List[str]=10 , lowerCAmelCase : List[str]=0.02 , lowerCAmelCase : Tuple=["stage2", "stage3", "stage4"] , lowerCAmelCase : str=[2, 3, 4] , lowerCAmelCase : Union[str, Any]=None , ):
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = num_channels
lowerCAmelCase = num_stages
lowerCAmelCase = hidden_sizes
lowerCAmelCase = depths
lowerCAmelCase = is_training
lowerCAmelCase = use_labels
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = num_labels
lowerCAmelCase = initializer_range
lowerCAmelCase = out_features
lowerCAmelCase = out_indices
lowerCAmelCase = scope
def __lowercase ( self : str ):
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Dict ):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __lowercase ( self : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Dict , lowerCAmelCase : Dict ):
lowerCAmelCase = ConvNextVaModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCAmelCase = model(lowerCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __lowercase ( self : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : str ):
lowerCAmelCase = ConvNextVaForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCAmelCase = model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : str ):
lowerCAmelCase = ConvNextVaBackbone(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCAmelCase = model(lowerCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCAmelCase = None
lowerCAmelCase = ConvNextVaBackbone(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCAmelCase = model(lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __lowercase ( self : Optional[Any] ):
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
def __lowercase ( self : Tuple ):
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _a , _a , unittest.TestCase ):
_a = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
_a = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
_a = False
def __lowercase ( self : Any ):
lowerCAmelCase = ConvNextVaModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 )
def __lowercase ( self : Optional[int] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase ( self : Dict ):
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def __lowercase ( self : Dict ):
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def __lowercase ( self : Optional[Any] ):
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def __lowercase ( self : Dict ):
pass
def __lowercase ( self : Union[str, Any] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_with_labels()
lowerCAmelCase = True
if model_class.__name__ in [
*get_values(lowerCAmelCase ),
*get_values(lowerCAmelCase ),
]:
continue
lowerCAmelCase = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.train()
lowerCAmelCase = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
lowerCAmelCase = model(**lowerCAmelCase ).loss
loss.backward()
def __lowercase ( self : Optional[int] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_with_labels()
lowerCAmelCase = False
lowerCAmelCase = True
if (
model_class.__name__
in [*get_values(lowerCAmelCase ), *get_values(lowerCAmelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
lowerCAmelCase = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.gradient_checkpointing_enable()
model.train()
lowerCAmelCase = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
lowerCAmelCase = model(**lowerCAmelCase ).loss
loss.backward()
def __lowercase ( self : Dict ):
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(lowerCAmelCase )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def __lowercase ( self : str ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def __lowercase ( self : Union[str, Any] ):
def check_hidden_states_output(lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : List[Any] ):
lowerCAmelCase = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def __lowercase ( self : int ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@slow
def __lowercase ( self : Any ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = ConvNextVaModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def lowercase () -> List[str]:
'''simple docstring'''
lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def __lowercase ( self : int ):
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def __lowercase ( self : int ):
lowerCAmelCase = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(lowerCAmelCase )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = preprocessor(images=lowerCAmelCase , return_tensors="""pt""" ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**lowerCAmelCase )
# verify the logits
lowerCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
lowerCAmelCase = torch.tensor([0.9996, 0.1966, -0.4386] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1e-4 ) )
| 169 | 1 |
import math
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> list:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [True] * n
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
SCREAMING_SNAKE_CASE_ : Optional[int] = i * 2
while index < n:
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : List[str] = index + i
SCREAMING_SNAKE_CASE_ : int = [2]
for i in range(3 , lowerCamelCase_ , 2 ):
if is_prime[i]:
primes.append(lowerCamelCase_ )
return primes
def __UpperCAmelCase ( lowerCamelCase_ : int = 99_99_66_66_33_33 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = math.floor(math.sqrt(lowerCamelCase_ ) ) + 1_00
SCREAMING_SNAKE_CASE_ : Optional[int] = prime_sieve(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = 0
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = primes[prime_index]
while (last_prime**2) <= limit:
SCREAMING_SNAKE_CASE_ : Optional[int] = primes[prime_index + 1]
SCREAMING_SNAKE_CASE_ : Optional[Any] = last_prime**2
SCREAMING_SNAKE_CASE_ : str = next_prime**2
# Get numbers divisible by lps(current)
SCREAMING_SNAKE_CASE_ : str = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
SCREAMING_SNAKE_CASE_ : List[Any] = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
SCREAMING_SNAKE_CASE_ : Any = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
SCREAMING_SNAKE_CASE_ : Union[str, Any] = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 717 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : List[Any] = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
UpperCamelCase__ : int = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
UpperCamelCase__ : str = {
'''abeja/gpt-neox-japanese-2.7b''': 20_48,
}
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : Dict = collections.OrderedDict()
SCREAMING_SNAKE_CASE_ : Dict = collections.OrderedDict()
SCREAMING_SNAKE_CASE_ : List[Any] = collections.OrderedDict()
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Any = f.readlines()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = b
SCREAMING_SNAKE_CASE_ : Dict = idx
for wd in b:
SCREAMING_SNAKE_CASE_ : Any = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Union[str, Any] = VOCAB_FILES_NAMES
__a : List[str] = PRETRAINED_VOCAB_FILES_MAP
__a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__="<|endoftext|>" ,snake_case__="<|endoftext|>" ,snake_case__="<|startoftext|>" ,snake_case__="<|endoftext|>" ,snake_case__=False ,**snake_case__ ,):
super().__init__(
unk_token=snake_case__ ,pad_token=snake_case__ ,bos_token=snake_case__ ,eos_token=snake_case__ ,do_clean_text=snake_case__ ,**snake_case__ ,)
if not os.path.isfile(snake_case__ ):
raise ValueError(
F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(snake_case__ ):
raise ValueError(
F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
SCREAMING_SNAKE_CASE_ : str = do_clean_text
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = load_vocab_and_emoji(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = SubWordJapaneseTokenizer(
vocab=self.vocab ,ids_to_tokens=self.ids_to_tokens ,emoji=self.emoji )
@property
def snake_case ( self ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def snake_case ( self ):
return dict(self.raw_vocab ,**self.added_tokens_encoder )
def snake_case ( self ,snake_case__ ):
return self.subword_tokenizer.tokenize(snake_case__ ,clean=self.do_clean_text )
def snake_case ( self ,snake_case__ ):
return self.vocab.get(snake_case__ ,self.vocab.get(self.unk_token ) )
def snake_case ( self ,snake_case__ ):
return self.subword_tokenizer.convert_id_to_token(snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = ''.join(snake_case__ ).strip()
return out_string
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case__ ,add_special_tokens=snake_case__ ) + [self.eos_token_id] )
if len(snake_case__ ) > self.model_max_length:
SCREAMING_SNAKE_CASE_ : List[Any] = input_ids[-self.model_max_length :]
return input_ids
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
if os.path.isdir(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
SCREAMING_SNAKE_CASE_ : Tuple = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
SCREAMING_SNAKE_CASE_ : str = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(snake_case__ ,'w' ,encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
SCREAMING_SNAKE_CASE_ : Dict = token_index
writer.write(','.join(snake_case__ ) + '\n' )
index += 1
with open(snake_case__ ,'w' ,encoding='utf-8' ) as writer:
json.dump(self.emoji ,snake_case__ )
return vocab_file, emoji_file
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = vocab # same as swe
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_to_tokens # same as bpe
SCREAMING_SNAKE_CASE_ : Dict = emoji
SCREAMING_SNAKE_CASE_ : int = np.max([len(snake_case__ ) for w in self.vocab.keys()] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = re.compile(R'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(R'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(R'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
SCREAMING_SNAKE_CASE_ : str = re.compile(
R'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
SCREAMING_SNAKE_CASE_ : str = re.compile(
R'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(
R'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
SCREAMING_SNAKE_CASE_ : str = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
SCREAMING_SNAKE_CASE_ : int = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
SCREAMING_SNAKE_CASE_ : Tuple = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self ):
return len(self.ids_to_tokens )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<URL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = self.content_repattera.sub('<EMAIL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<TEL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.content_repattera.sub('<DATE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.content_repattera.sub('<DATE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<PRICE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : str = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = content.replace('<BLOCK><BLOCK>' ,'<BLOCK>' )
return content
def snake_case ( self ,snake_case__ ,snake_case__=False ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace(' ' ,'<SP>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace(' ' ,'<SP>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace('\r\n' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace('\n' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace('\r' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : List[str] = text.replace('\t' ,'<TAB>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace('—' ,'ー' )
SCREAMING_SNAKE_CASE_ : Optional[int] = text.replace('−' ,'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
SCREAMING_SNAKE_CASE_ : int = text.replace(snake_case__ ,snake_case__ )
if clean:
SCREAMING_SNAKE_CASE_ : str = self.clean_text(snake_case__ )
def check_simbol(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = x.encode()
if len(snake_case__ ) == 1 and len(snake_case__ ) == 2:
SCREAMING_SNAKE_CASE_ : str = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC2A1 and c <= 0XC2BF)
or (c >= 0XC780 and c <= 0XC783)
or (c >= 0XCAB9 and c <= 0XCBBF)
or (c >= 0XCC80 and c <= 0XCDA2)
):
return True
return False
def checkuae(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = x.encode()
if len(snake_case__ ) == 1 and len(snake_case__ ) == 3:
SCREAMING_SNAKE_CASE_ : Dict = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE2_8080 and c <= 0XE2_B07F:
return True
return False
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : List[Any] = []
while pos < len(snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = min(len(snake_case__ ) ,pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
SCREAMING_SNAKE_CASE_ : List[Any] = [] # (token_id, token, pos)
for e in range(snake_case__ ,snake_case__ ,-1 ):
SCREAMING_SNAKE_CASE_ : str = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(snake_case__ ) > 2:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(snake_case__ ) > 0:
# the smallest token_id is adopted
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = sorted(snake_case__ ,key=lambda snake_case__ : x[0] )[0]
result.append(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = e
else:
SCREAMING_SNAKE_CASE_ : Any = pos + 1
SCREAMING_SNAKE_CASE_ : Optional[int] = text[pos:end]
if check_simbol(snake_case__ ):
result.append('<KIGOU>' )
elif checkuae(snake_case__ ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
SCREAMING_SNAKE_CASE_ : int = end
return result
def snake_case ( self ,snake_case__ ,snake_case__="\n" ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : Dict = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(snake_case__ ) > 0:
words.append(bytearray(snake_case__ ).decode('utf-8' ,errors='replace' ) )
SCREAMING_SNAKE_CASE_ : Dict = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(snake_case__ )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(snake_case__ )
if len(snake_case__ ) > 0:
words.append(bytearray(snake_case__ ).decode('utf-8' ,errors='replace' ) )
SCREAMING_SNAKE_CASE_ : int = ''.join(snake_case__ )
return text
| 685 | 0 |
"""simple docstring"""
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
return "".join(sorted(snake_case__ ) )
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
return word_by_signature[signature(snake_case__ )]
A_ = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''')
A_ = sorted({word.strip().lower() for word in data.splitlines()})
A_ = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
A_ = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('''anagrams.txt''', '''w''') as file:
file.write('''all_anagrams = \n ''')
file.write(pprint.pformat(all_anagrams))
| 609 |
"""simple docstring"""
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class lowercase( nn.Module ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = 42
lowercase__ = 0.0
lowercase__ = 1
lowercase__ = 1
lowercase__ = True
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = jnp.floataa
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : int = []
_snake_case : Tuple = []
for i in range(self.num_layers ):
_snake_case : str = self.in_channels if i == 0 else self.out_channels
_snake_case : Tuple = FlaxResnetBlockaD(
in_channels=a_, out_channels=self.out_channels, dropout_prob=self.dropout, dtype=self.dtype, )
resnets.append(a_ )
_snake_case : str = FlaxTransformeraDModel(
in_channels=self.out_channels, n_heads=self.num_attention_heads, d_head=self.out_channels // self.num_attention_heads, depth=1, use_linear_projection=self.use_linear_projection, only_cross_attention=self.only_cross_attention, use_memory_efficient_attention=self.use_memory_efficient_attention, dtype=self.dtype, )
attentions.append(a_ )
_snake_case : Optional[int] = resnets
_snake_case : Tuple = attentions
if self.add_downsample:
_snake_case : Optional[int] = FlaxDownsampleaD(self.out_channels, dtype=self.dtype )
def __call__( self: Dict, a_: Any, a_: List[str], a_: Optional[Any], a_: List[str]=True ):
'''simple docstring'''
_snake_case : List[str] = ()
for resnet, attn in zip(self.resnets, self.attentions ):
_snake_case : Optional[Any] = resnet(a_, a_, deterministic=a_ )
_snake_case : str = attn(a_, a_, deterministic=a_ )
output_states += (hidden_states,)
if self.add_downsample:
_snake_case : Optional[Any] = self.downsamplers_a(a_ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowercase( nn.Module ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = 42
lowercase__ = 0.0
lowercase__ = 1
lowercase__ = True
lowercase__ = jnp.floataa
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Any = []
for i in range(self.num_layers ):
_snake_case : str = self.in_channels if i == 0 else self.out_channels
_snake_case : int = FlaxResnetBlockaD(
in_channels=a_, out_channels=self.out_channels, dropout_prob=self.dropout, dtype=self.dtype, )
resnets.append(a_ )
_snake_case : Optional[Any] = resnets
if self.add_downsample:
_snake_case : Tuple = FlaxDownsampleaD(self.out_channels, dtype=self.dtype )
def __call__( self: Optional[Any], a_: Optional[Any], a_: List[Any], a_: Optional[int]=True ):
'''simple docstring'''
_snake_case : int = ()
for resnet in self.resnets:
_snake_case : Any = resnet(a_, a_, deterministic=a_ )
output_states += (hidden_states,)
if self.add_downsample:
_snake_case : Dict = self.downsamplers_a(a_ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowercase( nn.Module ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
lowercase__ = 0.0
lowercase__ = 1
lowercase__ = 1
lowercase__ = True
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = jnp.floataa
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Any = []
_snake_case : List[Any] = []
for i in range(self.num_layers ):
_snake_case : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_snake_case : Optional[Any] = self.prev_output_channel if i == 0 else self.out_channels
_snake_case : Optional[int] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels, out_channels=self.out_channels, dropout_prob=self.dropout, dtype=self.dtype, )
resnets.append(a_ )
_snake_case : str = FlaxTransformeraDModel(
in_channels=self.out_channels, n_heads=self.num_attention_heads, d_head=self.out_channels // self.num_attention_heads, depth=1, use_linear_projection=self.use_linear_projection, only_cross_attention=self.only_cross_attention, use_memory_efficient_attention=self.use_memory_efficient_attention, dtype=self.dtype, )
attentions.append(a_ )
_snake_case : Any = resnets
_snake_case : Union[str, Any] = attentions
if self.add_upsample:
_snake_case : Dict = FlaxUpsampleaD(self.out_channels, dtype=self.dtype )
def __call__( self: Any, a_: int, a_: List[Any], a_: Union[str, Any], a_: Optional[Any], a_: List[Any]=True ):
'''simple docstring'''
for resnet, attn in zip(self.resnets, self.attentions ):
# pop res hidden states
_snake_case : List[str] = res_hidden_states_tuple[-1]
_snake_case : Any = res_hidden_states_tuple[:-1]
_snake_case : int = jnp.concatenate((hidden_states, res_hidden_states), axis=-1 )
_snake_case : Optional[Any] = resnet(a_, a_, deterministic=a_ )
_snake_case : str = attn(a_, a_, deterministic=a_ )
if self.add_upsample:
_snake_case : Dict = self.upsamplers_a(a_ )
return hidden_states
class lowercase( nn.Module ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
lowercase__ = 0.0
lowercase__ = 1
lowercase__ = True
lowercase__ = jnp.floataa
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : List[str] = []
for i in range(self.num_layers ):
_snake_case : Dict = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_snake_case : int = self.prev_output_channel if i == 0 else self.out_channels
_snake_case : List[Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels, out_channels=self.out_channels, dropout_prob=self.dropout, dtype=self.dtype, )
resnets.append(a_ )
_snake_case : List[Any] = resnets
if self.add_upsample:
_snake_case : Optional[Any] = FlaxUpsampleaD(self.out_channels, dtype=self.dtype )
def __call__( self: str, a_: Any, a_: List[Any], a_: str, a_: Union[str, Any]=True ):
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
_snake_case : int = res_hidden_states_tuple[-1]
_snake_case : Dict = res_hidden_states_tuple[:-1]
_snake_case : List[Any] = jnp.concatenate((hidden_states, res_hidden_states), axis=-1 )
_snake_case : Any = resnet(a_, a_, deterministic=a_ )
if self.add_upsample:
_snake_case : int = self.upsamplers_a(a_ )
return hidden_states
class lowercase( nn.Module ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = 0.0
lowercase__ = 1
lowercase__ = 1
lowercase__ = False
lowercase__ = False
lowercase__ = jnp.floataa
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = [
FlaxResnetBlockaD(
in_channels=self.in_channels, out_channels=self.in_channels, dropout_prob=self.dropout, dtype=self.dtype, )
]
_snake_case : int = []
for _ in range(self.num_layers ):
_snake_case : Optional[Any] = FlaxTransformeraDModel(
in_channels=self.in_channels, n_heads=self.num_attention_heads, d_head=self.in_channels // self.num_attention_heads, depth=1, use_linear_projection=self.use_linear_projection, use_memory_efficient_attention=self.use_memory_efficient_attention, dtype=self.dtype, )
attentions.append(a_ )
_snake_case : Tuple = FlaxResnetBlockaD(
in_channels=self.in_channels, out_channels=self.in_channels, dropout_prob=self.dropout, dtype=self.dtype, )
resnets.append(a_ )
_snake_case : Optional[Any] = resnets
_snake_case : List[str] = attentions
def __call__( self: Tuple, a_: Optional[int], a_: str, a_: Optional[int], a_: int=True ):
'''simple docstring'''
_snake_case : Dict = self.resnets[0](a_, a_ )
for attn, resnet in zip(self.attentions, self.resnets[1:] ):
_snake_case : str = attn(a_, a_, deterministic=a_ )
_snake_case : Union[str, Any] = resnet(a_, a_, deterministic=a_ )
return hidden_states
| 609 | 1 |
"""simple docstring"""
from math import factorial, pi
def __UpperCamelCase ( snake_case__ , snake_case__ = 30 ):
if not isinstance(__SCREAMING_SNAKE_CASE , (int, float) ):
raise ValueError("""maclaurin_sin() requires either an int or float for theta""" )
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or accuracy <= 0:
raise ValueError("""maclaurin_sin() requires a positive int for accuracy""" )
A_ : List[Any] = float(__SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__SCREAMING_SNAKE_CASE ) )
def __UpperCamelCase ( snake_case__ , snake_case__ = 30 ):
if not isinstance(__SCREAMING_SNAKE_CASE , (int, float) ):
raise ValueError("""maclaurin_cos() requires either an int or float for theta""" )
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or accuracy <= 0:
raise ValueError("""maclaurin_cos() requires a positive int for accuracy""" )
A_ : str = float(__SCREAMING_SNAKE_CASE )
A_ : str = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 701 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 480 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
a__ : Dict = logging.get_logger(__name__)
def __lowerCamelCase ( UpperCAmelCase_ ) ->Union[str, Any]:
snake_case__ = 'huggingface/label-files'
snake_case__ = 'imagenet-1k-id2label.json'
snake_case__ = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type='dataset' ) , 'r' ) )
snake_case__ = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
snake_case__ = {v: k for k, v in idalabel.items()}
snake_case__ = 'std_conv' if 'bit' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
snake_case__ = BitConfig(
conv_layer=UpperCAmelCase_ , num_labels=10_00 , idalabel=UpperCAmelCase_ , labelaid=UpperCAmelCase_ , )
return config
def __lowerCamelCase ( UpperCAmelCase_ ) ->str:
if "stem.conv" in name:
snake_case__ = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
snake_case__ = name.replace('blocks' , 'layers' )
if "head.fc" in name:
snake_case__ = name.replace('head.fc' , 'classifier.1' )
if name.startswith('norm' ):
snake_case__ = 'bit.' + name
if "bit" not in name and "classifier" not in name:
snake_case__ = 'bit.encoder.' + name
return name
def __lowerCamelCase ( ) ->Union[str, Any]:
snake_case__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case__ = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=False ) ->int:
snake_case__ = get_config(UpperCAmelCase_ )
# load original model from timm
snake_case__ = create_model(UpperCAmelCase_ , pretrained=UpperCAmelCase_ )
timm_model.eval()
# load state_dict of original model
snake_case__ = timm_model.state_dict()
for key in state_dict.copy().keys():
snake_case__ = state_dict.pop(UpperCAmelCase_ )
snake_case__ = val.squeeze() if 'head' in key else val
# load HuggingFace model
snake_case__ = BitForImageClassification(UpperCAmelCase_ )
model.eval()
model.load_state_dict(UpperCAmelCase_ )
# create image processor
snake_case__ = create_transform(**resolve_data_config({} , model=UpperCAmelCase_ ) )
snake_case__ = transform.transforms
snake_case__ = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
snake_case__ = BitImageProcessor(
do_resize=UpperCAmelCase_ , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=UpperCAmelCase_ , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=UpperCAmelCase_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case__ = prepare_img()
snake_case__ = transform(UpperCAmelCase_ ).unsqueeze(0 )
snake_case__ = processor(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ )
# verify logits
with torch.no_grad():
snake_case__ = model(UpperCAmelCase_ )
snake_case__ = outputs.logits
print('Logits:' , logits[0, :3] )
print('Predicted class:' , model.config.idalabel[logits.argmax(-1 ).item()] )
snake_case__ = timm_model(UpperCAmelCase_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCAmelCase_ , outputs.logits , atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase_ )
processor.save_pretrained(UpperCAmelCase_ )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
a__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''resnetv2_50x1_bitm''',
type=str,
help='''Name of the BiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model to the hub.''',
)
a__ : int = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 368 |
'''simple docstring'''
import cmath
import math
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->complex:
snake_case__ = math.radians(UpperCAmelCase_ )
snake_case__ = math.radians(UpperCAmelCase_ )
# Convert voltage and current to rectangular form
snake_case__ = cmath.rect(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case__ = cmath.rect(UpperCAmelCase_ , UpperCAmelCase_ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368 | 1 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_lowerCAmelCase = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
_lowerCAmelCase = {
'allenai/led-base-16384': 1_6_3_8_4,
}
class __UpperCAmelCase( A__ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = LEDTokenizer
__magic_name__ = ["""input_ids""", """attention_mask"""]
def __init__( self , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__="replace" , __magic_name__="<s>" , __magic_name__="</s>" , __magic_name__="</s>" , __magic_name__="<s>" , __magic_name__="<unk>" , __magic_name__="<pad>" , __magic_name__="<mask>" , __magic_name__=False , __magic_name__=True , **__magic_name__ , ):
"""simple docstring"""
super().__init__(
__magic_name__ , __magic_name__ , tokenizer_file=__magic_name__ , errors=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ , **__magic_name__ , )
A_ : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __magic_name__ ) != add_prefix_space:
A_ : Tuple = getattr(__magic_name__ , pre_tok_state.pop('''type''' ) )
A_ : Any = add_prefix_space
A_ : List[str] = pre_tok_class(**__magic_name__ )
A_ : Union[str, Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A_ : int = '''post_processor'''
A_ : List[str] = getattr(self.backend_tokenizer , __magic_name__ , __magic_name__ )
if tokenizer_component_instance:
A_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A_ : List[Any] = tuple(state['''sep'''] )
if "cls" in state:
A_ : Any = tuple(state['''cls'''] )
A_ : Optional[Any] = False
if state.get('''add_prefix_space''' , __magic_name__ ) != add_prefix_space:
A_ : Optional[Any] = add_prefix_space
A_ : Tuple = True
if state.get('''trim_offsets''' , __magic_name__ ) != trim_offsets:
A_ : str = trim_offsets
A_ : Dict = True
if changes_to_apply:
A_ : str = getattr(__magic_name__ , state.pop('''type''' ) )
A_ : Optional[Any] = component_class(**__magic_name__ )
setattr(self.backend_tokenizer , __magic_name__ , __magic_name__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def UpperCAmelCase ( self ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
A_ : Dict = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else value
A_ : Union[str, Any] = value
def UpperCAmelCase ( self , *__magic_name__ , **__magic_name__ ):
"""simple docstring"""
A_ : Any = kwargs.get('''is_split_into_words''' , __magic_name__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*__magic_name__ , **__magic_name__ )
def UpperCAmelCase ( self , *__magic_name__ , **__magic_name__ ):
"""simple docstring"""
A_ : Tuple = kwargs.get('''is_split_into_words''' , __magic_name__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*__magic_name__ , **__magic_name__ )
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None ):
"""simple docstring"""
A_ : Optional[Any] = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
def UpperCAmelCase ( self , __magic_name__ , __magic_name__=None ):
"""simple docstring"""
A_ : str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None ):
"""simple docstring"""
A_ : Any = [self.sep_token_id]
A_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = PaddingStrategy.DO_NOT_PAD , __magic_name__ = None , __magic_name__ = None , ):
"""simple docstring"""
A_ : str = super()._pad(
encoded_inputs=__magic_name__ , max_length=__magic_name__ , padding_strategy=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_attention_mask=__magic_name__ , )
# Load from model defaults
if return_attention_mask is None:
A_ : Dict = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
A_ : str = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
A_ : int = len(encoded_inputs['''global_attention_mask'''] ) != len(__magic_name__ )
if needs_to_be_padded:
A_ : int = len(__magic_name__ ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
A_ : Optional[int] = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
A_ : List[str] = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 236 | import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_lowerCAmelCase = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
_lowerCAmelCase = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
_lowerCAmelCase = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
_lowerCAmelCase = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_1_2,
'facebook/dpr-ctx_encoder-multiset-base': 5_1_2,
}
_lowerCAmelCase = {
'facebook/dpr-question_encoder-single-nq-base': 5_1_2,
'facebook/dpr-question_encoder-multiset-base': 5_1_2,
}
_lowerCAmelCase = {
'facebook/dpr-reader-single-nq-base': 5_1_2,
'facebook/dpr-reader-multiset-base': 5_1_2,
}
_lowerCAmelCase = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
_lowerCAmelCase = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
_lowerCAmelCase = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class __UpperCAmelCase( A__ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __UpperCAmelCase( A__ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
_lowerCAmelCase = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
_lowerCAmelCase = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(A__ )
class __UpperCAmelCase:
"""simple docstring"""
def __call__( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = False , __magic_name__ = False , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , **__magic_name__ , ):
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , return_tensors=__magic_name__ , return_attention_mask=__magic_name__ , **__magic_name__ , )
elif titles is None or texts is None:
A_ : Dict = titles if texts is None else texts
return super().__call__(
__magic_name__ , __magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , return_tensors=__magic_name__ , return_attention_mask=__magic_name__ , **__magic_name__ , )
A_ : Optional[int] = titles if not isinstance(__magic_name__ , __magic_name__ ) else [titles]
A_ : Dict = texts if not isinstance(__magic_name__ , __magic_name__ ) else [texts]
A_ : Tuple = len(__magic_name__ )
A_ : Any = questions if not isinstance(__magic_name__ , __magic_name__ ) else [questions] * n_passages
if len(__magic_name__ ) != len(__magic_name__ ):
raise ValueError(
f"""There should be as many titles than texts but got {len(__magic_name__ )} titles and {len(__magic_name__ )} texts.""" )
A_ : Optional[int] = super().__call__(__magic_name__ , __magic_name__ , padding=__magic_name__ , truncation=__magic_name__ )['''input_ids''']
A_ : Dict = super().__call__(__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ )['''input_ids''']
A_ : Tuple = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__magic_name__ , __magic_name__ )
]
}
if return_attention_mask is not False:
A_ : Dict = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
A_ : Tuple = attention_mask
return self.pad(__magic_name__ , padding=__magic_name__ , max_length=__magic_name__ , return_tensors=__magic_name__ )
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ = 16 , __magic_name__ = 64 , __magic_name__ = 4 , ):
"""simple docstring"""
A_ : Any = reader_input['''input_ids''']
A_ , A_ , A_ : Dict = reader_output[:3]
A_ : Tuple = len(__magic_name__ )
A_ : int = sorted(range(__magic_name__ ) , reverse=__magic_name__ , key=relevance_logits.__getitem__ )
A_ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
A_ : Any = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
A_ : Optional[int] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
A_ : List[Any] = sequence_ids.index(self.pad_token_id )
else:
A_ : Dict = len(__magic_name__ )
A_ : List[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__magic_name__ , top_spans=__magic_name__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__magic_name__ , start_index=__magic_name__ , end_index=__magic_name__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__magic_name__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
"""simple docstring"""
A_ : Union[str, Any] = []
for start_index, start_score in enumerate(__magic_name__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
A_ : List[str] = sorted(__magic_name__ , key=lambda __magic_name__ : x[1] , reverse=__magic_name__ )
A_ : str = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"""Wrong span indices: [{start_index}:{end_index}]""" )
A_ : List[str] = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__magic_name__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(A__ )
class __UpperCAmelCase( A__ , A__ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = READER_PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = READER_PRETRAINED_INIT_CONFIGURATION
__magic_name__ = ["""input_ids""", """attention_mask"""]
| 236 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Dict:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
snake_case_ : Union[str, Any] = str(bin(lowerCAmelCase_ ) )[2:] # remove the leading "0b"
snake_case_ : List[str] = str(bin(lowerCAmelCase_ ) )[2:] # remove the leading "0b"
snake_case_ : Dict = max(len(lowerCAmelCase_ ) ,len(lowerCAmelCase_ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(lowerCAmelCase_ ) ,b_binary.zfill(lowerCAmelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 653 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = 42
snake_case__ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.26.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version('>=', '0.0.12')
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = 42
snake_case__ = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 61 | 0 |
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [[] for _ in range(__UpperCamelCase )]
SCREAMING_SNAKE_CASE__ = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1 or len(__UpperCamelCase ) <= key:
return input_string
for position, character in enumerate(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE__ = min(__UpperCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = ["""""".join(__UpperCamelCase ) for row in temp_grid]
SCREAMING_SNAKE_CASE__ = """""".join(__UpperCamelCase )
return output_string
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1:
return input_string
SCREAMING_SNAKE_CASE__ = [[] for _ in range(__UpperCamelCase )] # generates template
for position in range(len(__UpperCamelCase ) ):
SCREAMING_SNAKE_CASE__ = position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE__ = min(__UpperCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("""*""" )
SCREAMING_SNAKE_CASE__ = 0
for row in temp_grid: # fills in the characters
SCREAMING_SNAKE_CASE__ = input_string[counter : counter + len(__UpperCamelCase )]
grid.append(list(__UpperCamelCase ) )
counter += len(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = """""" # reads as zigzag
for position in range(len(__UpperCamelCase ) ):
SCREAMING_SNAKE_CASE__ = position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE__ = min(__UpperCamelCase , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> dict[int, str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {}
for key_guess in range(1 , len(__UpperCamelCase ) ): # tries every key
SCREAMING_SNAKE_CASE__ = decrypt(__UpperCamelCase , __UpperCamelCase )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 379 | # coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
__lowerCamelCase : Tuple = '''3'''
print('''Python version:''', sys.version)
print('''transformers version:''', transformers.__version__)
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
print('''NCCL version:''', torch.cuda.nccl.version())
except ImportError:
print('''Torch version:''', None)
try:
import deepspeed
print('''DeepSpeed version:''', deepspeed.__version__)
except ImportError:
print('''DeepSpeed version:''', None)
try:
import tensorflow as tf
print('''TensorFlow version:''', tf.__version__)
print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU''')))
print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU''')))
except ImportError:
print('''TensorFlow version:''', None)
| 379 | 1 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowercase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=32 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=[10, 20, 30, 40] , UpperCAmelCase=[2, 2, 3, 2] , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=10 , UpperCAmelCase=0.02 , UpperCAmelCase=["stage2", "stage3", "stage4"] , UpperCAmelCase=3 , UpperCAmelCase=None , ):
'''simple docstring'''
_lowercase = parent
_lowercase = batch_size
_lowercase = image_size
_lowercase = num_channels
_lowercase = num_stages
_lowercase = hidden_sizes
_lowercase = depths
_lowercase = is_training
_lowercase = use_labels
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = type_sequence_label_size
_lowercase = initializer_range
_lowercase = out_features
_lowercase = num_labels
_lowercase = scope
_lowercase = num_stages
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase = None
if self.use_labels:
_lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def _UpperCAmelCase ( self ):
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=UpperCAmelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=UpperCAmelCase , loss_ignore_index=255 , num_labels=self.num_labels , )
def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
_lowercase = UperNetForSemanticSegmentation(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase = model(UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) = config_and_inputs
_lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowerCAmelCase__ = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = UperNetModelTester(self )
_lowercase = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 )
def _UpperCAmelCase ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self ):
'''simple docstring'''
return
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(UpperCAmelCase )
_lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase = [*signature.parameters.keys()]
_lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def _UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def _UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def _UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def _UpperCAmelCase ( self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def _UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _UpperCAmelCase ( self ):
'''simple docstring'''
pass
def _UpperCAmelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
_lowercase = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
_lowercase = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
_lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowercase = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase = _config_zero_init(UpperCAmelCase )
_lowercase = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
_lowercase = model_class(config=UpperCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def _UpperCAmelCase ( self ):
'''simple docstring'''
pass
@slow
def _UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase = UperNetForSemanticSegmentation.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def __lowerCAmelCase ( ):
"""simple docstring"""
_lowercase = hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" ,repo_type="""dataset""" ,filename="""ADE_val_00000001.jpg""" )
_lowercase = Image.open(_A ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
_lowercase = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(UpperCAmelCase )
_lowercase = prepare_img()
_lowercase = processor(images=UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase )
with torch.no_grad():
_lowercase = model(**UpperCAmelCase )
_lowercase = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
_lowercase = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
_lowercase = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(UpperCAmelCase )
_lowercase = prepare_img()
_lowercase = processor(images=UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase )
with torch.no_grad():
_lowercase = model(**UpperCAmelCase )
_lowercase = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
_lowercase = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
| 398 | from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def __lowerCAmelCase ( _A ):
"""simple docstring"""
return DownloadCommand(args.model ,args.cache_dir ,args.force ,args.trust_remote_code )
class _lowercase ( _UpperCAmelCase ):
"""simple docstring"""
@staticmethod
def _UpperCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
_lowercase = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" , type=UpperCAmelCase , default=UpperCAmelCase , help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , )
download_parser.add_argument("""model""" , type=UpperCAmelCase , help="""Name of the model to download""" )
download_parser.set_defaults(func=UpperCAmelCase )
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
_lowercase = model
_lowercase = cache
_lowercase = force
_lowercase = trust_remote_code
def _UpperCAmelCase ( self ):
'''simple docstring'''
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 398 | 1 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = (DDIMParallelScheduler,)
lowerCAmelCase = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ : Optional[int] = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> List[str]:
UpperCAmelCase_ : int = self.scheduler_classes[0]
UpperCAmelCase_ : Any = self.get_scheduler_config(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = 10, 0.0
UpperCAmelCase_ : List[str] = self.dummy_model()
UpperCAmelCase_ : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
for t in scheduler.timesteps:
UpperCAmelCase_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).prev_sample
return sample
def a__ ( self ) -> Any:
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Optional[int]:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase_ : str = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase_ : Any = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps ,torch.LongTensor([801, 601, 401, 201, 1] ) )
def a__ ( self ) -> Optional[Any]:
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] ,[0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_SCREAMING_SNAKE_CASE ,beta_end=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Optional[int]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> List[str]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Any:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Union[str, Any]:
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> int:
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> int:
self.check_over_configs(thresholding=_SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=_SCREAMING_SNAKE_CASE ,prediction_type=_SCREAMING_SNAKE_CASE ,sample_max_value=_SCREAMING_SNAKE_CASE ,)
def a__ ( self ) -> Tuple:
for t in [1, 10, 49]:
self.check_over_forward(time_step=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> int:
for t, num_inference_steps in zip([1, 10, 50] ,[10, 50, 500] ):
self.check_over_forward(time_step=_SCREAMING_SNAKE_CASE ,num_inference_steps=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Dict:
for t, eta in zip([1, 10, 49] ,[0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=_SCREAMING_SNAKE_CASE ,eta=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Tuple = self.scheduler_classes[0]
UpperCAmelCase_ : int = self.get_scheduler_config()
UpperCAmelCase_ : str = scheduler_class(**_SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 ,400 ) - 0.1_47_71 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 ,960 ) - 0.3_24_60 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ,486 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ,998 ) - 0.02 ) ) < 1e-5
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = self.scheduler_classes[0]
UpperCAmelCase_ : int = self.get_scheduler_config()
UpperCAmelCase_ : Any = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_, UpperCAmelCase_ : Dict = 10, 0.0
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = self.dummy_model()
UpperCAmelCase_ : Optional[int] = self.dummy_sample_deter
UpperCAmelCase_ : List[Any] = self.dummy_sample_deter + 0.1
UpperCAmelCase_ : Any = self.dummy_sample_deter - 0.1
UpperCAmelCase_ : Optional[Any] = samplea.shape[0]
UpperCAmelCase_ : Any = torch.stack([samplea, samplea, samplea] ,dim=0 )
UpperCAmelCase_ : List[Any] = torch.arange(_SCREAMING_SNAKE_CASE )[0:3, None].repeat(1 ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = model(samples.flatten(0 ,1 ) ,timesteps.flatten(0 ,1 ) )
UpperCAmelCase_ : Dict = scheduler.batch_step_no_noise(_SCREAMING_SNAKE_CASE ,timesteps.flatten(0 ,1 ) ,samples.flatten(0 ,1 ) ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : Optional[int] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1e-2
assert abs(result_mean.item() - 0.49_82 ) < 1e-3
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : Any = self.full_loop()
UpperCAmelCase_ : int = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : Optional[Any] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1e-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Dict = self.full_loop(prediction_type='''v_prediction''' )
UpperCAmelCase_ : Optional[Any] = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : Tuple = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 52.53_02 ) < 1e-2
assert abs(result_mean.item() - 0.06_84 ) < 1e-3
def a__ ( self ) -> List[str]:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ : List[Any] = self.full_loop(set_alpha_to_one=_SCREAMING_SNAKE_CASE ,beta_start=0.01 )
UpperCAmelCase_ : int = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : List[str] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1e-2
assert abs(result_mean.item() - 0.19_51 ) < 1e-3
def a__ ( self ) -> Optional[Any]:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ : Optional[Any] = self.full_loop(set_alpha_to_one=_SCREAMING_SNAKE_CASE ,beta_start=0.01 )
UpperCAmelCase_ : Optional[int] = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : List[str] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1e-2
assert abs(result_mean.item() - 0.19_41 ) < 1e-3 | 300 |
from math import factorial, radians
def lowerCamelCase__ ( _lowercase , _lowercase = 18 , _lowercase = 10 ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
UpperCAmelCase_ : int = radians(_lowercase )
UpperCAmelCase_ : Tuple = angle_in_radians
UpperCAmelCase_ : int = 3
UpperCAmelCase_ : str = -1
for _ in range(_lowercase ):
result += (b * (angle_in_radians**a)) / factorial(_lowercase )
UpperCAmelCase_ : Optional[Any] = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(_lowercase , _lowercase )
if __name__ == "__main__":
__import__('doctest').testmod() | 300 | 1 |
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class A ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
self.assertEqual(len(UpperCamelCase__ ), len(UpperCamelCase__ ) )
for a, b in zip(UpperCamelCase__, UpperCamelCase__ ):
self.assertAlmostEqual(UpperCamelCase__, UpperCamelCase__, delta=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(UpperCamelCase__ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step, 3 )
self.assertEqual(len(accumulator.gradients ), 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist(), [-2.0, 5.0], tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step, 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist(), [0.0, 0.0], tol=1E-2 )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = None
ops.enable_eager_execution_internal()
lowerCAmelCase_ = tf.config.list_physical_devices('''CPU''' )
if len(UpperCamelCase__ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0], [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
lowerCAmelCase_ = tf.config.list_logical_devices(device_type='''CPU''' )
lowerCAmelCase_ = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
lowerCAmelCase_ = GradientAccumulator()
lowerCAmelCase_ = tf.Variable([4.0, 3.0] )
lowerCAmelCase_ , lowerCAmelCase_ = create_optimizer(5E-5, 10, 5 )
lowerCAmelCase_ = tf.Variable([0.0, 0.0], trainable=UpperCamelCase__ )
def accumulate_on_replica(UpperCamelCase__ ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients, [variable] ) ) )
@tf.function
def accumulate(UpperCamelCase__, UpperCamelCase__ ):
with strategy.scope():
lowerCAmelCase_ = strategy.experimental_local_results(UpperCamelCase__ )
local_variables[0].assign(UpperCamelCase__ )
local_variables[1].assign(UpperCamelCase__ )
strategy.run(UpperCamelCase__, args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(UpperCamelCase__ )
def _check_local_values(UpperCamelCase__, UpperCamelCase__ ):
lowerCAmelCase_ = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value(), UpperCamelCase__, tol=1E-2 )
self.assertListAlmostEqual(values[1].value(), UpperCamelCase__, tol=1E-2 )
accumulate([1.0, 2.0], [-1.0, 1.0] )
accumulate([3.0, -1.0], [-1.0, -1.0] )
accumulate([-2.0, 2.0], [3.0, -2.0] )
self.assertEqual(accumulator.step, 3 )
_check_local_values([2.0, 3.0], [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value(), [4.0, 3.0], tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step, 0 )
_check_local_values([0.0, 0.0], [0.0, 0.0] )
| 431 |
import re
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = re.compile(
r'''^(?:0|94|\+94|0{2}94)''' r'''7(0|1|2|4|5|6|7|8)''' r'''(-| |)''' r'''\d{7}$''' )
return bool(re.search(_A , _A ) )
if __name__ == "__main__":
_A = '''0094702343221'''
print(is_sri_lankan_phone_number(phone))
| 431 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a__ : Any =logging.get_logger(__name__)
def lowercase__ ( __lowercase : List[str] , __lowercase : Tuple ) -> int:
"""simple docstring"""
__UpperCamelCase = b.T
__UpperCamelCase = np.sum(np.square(__lowercase ) , axis=1 )
__UpperCamelCase = np.sum(np.square(__lowercase ) , axis=0 )
__UpperCamelCase = np.matmul(__lowercase , __lowercase )
__UpperCamelCase = aa[:, None] - 2 * ab + ba[None, :]
return d
def lowercase__ ( __lowercase : Optional[Any] , __lowercase : Tuple ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = x.reshape(-1 , 3 )
__UpperCamelCase = squared_euclidean_distance(__lowercase , __lowercase )
return np.argmin(__lowercase , axis=1 )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict =["pixel_values"]
def __init__( self : Any , __A : Optional[Union[List[List[int]], np.ndarray]] = None , __A : bool = True , __A : Dict[str, int] = None , __A : PILImageResampling = PILImageResampling.BILINEAR , __A : bool = True , __A : bool = True , **__A : Optional[int] , ):
super().__init__(**__A )
__UpperCamelCase = size if size is not None else {'height': 2_5_6, 'width': 2_5_6}
__UpperCamelCase = get_size_dict(__A )
__UpperCamelCase = np.array(__A ) if clusters is not None else None
__UpperCamelCase = do_resize
__UpperCamelCase = size
__UpperCamelCase = resample
__UpperCamelCase = do_normalize
__UpperCamelCase = do_color_quantize
def _lowerCamelCase ( self : Optional[Any] , __A : np.ndarray , __A : Dict[str, int] , __A : PILImageResampling = PILImageResampling.BILINEAR , __A : Optional[Union[str, ChannelDimension]] = None , **__A : Union[str, Any] , ):
__UpperCamelCase = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
__A , size=(size['height'], size['width']) , resample=__A , data_format=__A , **__A )
def _lowerCamelCase ( self : Union[str, Any] , __A : np.ndarray , __A : Optional[Union[str, ChannelDimension]] = None , ):
__UpperCamelCase = rescale(image=__A , scale=1 / 127.5 , data_format=__A )
__UpperCamelCase = image - 1
return image
def _lowerCamelCase ( self : List[str] , __A : ImageInput , __A : bool = None , __A : Dict[str, int] = None , __A : PILImageResampling = None , __A : bool = None , __A : Optional[bool] = None , __A : Optional[Union[List[List[int]], np.ndarray]] = None , __A : Optional[Union[str, TensorType]] = None , __A : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **__A : Optional[Any] , ):
__UpperCamelCase = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase = size if size is not None else self.size
__UpperCamelCase = get_size_dict(__A )
__UpperCamelCase = resample if resample is not None else self.resample
__UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
__UpperCamelCase = clusters if clusters is not None else self.clusters
__UpperCamelCase = np.array(__A )
__UpperCamelCase = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.' )
# All transformations expect numpy arrays.
__UpperCamelCase = [to_numpy_array(__A ) for image in images]
if do_resize:
__UpperCamelCase = [self.resize(image=__A , size=__A , resample=__A ) for image in images]
if do_normalize:
__UpperCamelCase = [self.normalize(image=__A ) for image in images]
if do_color_quantize:
__UpperCamelCase = [to_channel_dimension_format(__A , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
__UpperCamelCase = np.array(__A )
__UpperCamelCase = color_quantize(__A , __A ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
__UpperCamelCase = images.shape[0]
__UpperCamelCase = images.reshape(__A , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
__UpperCamelCase = list(__A )
else:
__UpperCamelCase = [to_channel_dimension_format(__A , __A ) for image in images]
__UpperCamelCase = {'input_ids': images}
return BatchFeature(data=__A , tensor_type=__A )
| 434 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class snake_case :
"""simple docstring"""
def __init__( self : str , __A : List[str] , __A : Optional[Any]=1_3 , __A : Any=2 , __A : List[Any]=2_4 , __A : List[str]=1_6 , __A : Tuple=True , __A : int=True , __A : Tuple=3_2 , __A : int=5 , __A : Dict=4 , __A : Any=3_7 , __A : Optional[Any]="gelu" , __A : List[Any]=0.1 , __A : str=0.1 , __A : Dict=1_0 , __A : Any=0.02 , __A : Optional[Any]=None , __A : Dict=2 , __A : Optional[int]=2 , ):
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = patch_size
__UpperCamelCase = max_length
__UpperCamelCase = num_mel_bins
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = scope
__UpperCamelCase = frequency_stride
__UpperCamelCase = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__UpperCamelCase = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
__UpperCamelCase = (self.max_length - self.patch_size) // self.time_stride + 1
__UpperCamelCase = frequency_out_dimension * time_out_dimension
__UpperCamelCase = num_patches + 2
def _lowerCamelCase ( self : int ):
__UpperCamelCase = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = self.get_config()
return config, input_values, labels
def _lowerCamelCase ( self : str ):
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def _lowerCamelCase ( self : List[str] , __A : str , __A : Dict , __A : Union[str, Any] ):
__UpperCamelCase = ASTModel(config=__A )
model.to(__A )
model.eval()
__UpperCamelCase = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : List[str] ):
__UpperCamelCase = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) = config_and_inputs
__UpperCamelCase = {'input_values': input_values}
return config, inputs_dict
@require_torch
class snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] =(
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Optional[Any] =(
{"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : List[str] =False
SCREAMING_SNAKE_CASE_ : Optional[Any] =False
SCREAMING_SNAKE_CASE_ : Dict =False
SCREAMING_SNAKE_CASE_ : Dict =False
def _lowerCamelCase ( self : Dict , __A : Optional[int] , __A : Optional[int] , __A : Tuple , __A : Optional[int] , __A : Optional[Any] ):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _lowerCamelCase ( self : List[Any] ):
__UpperCamelCase = ASTModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=3_7 )
def _lowerCamelCase ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='AST does not use inputs_embeds' )
def _lowerCamelCase ( self : int ):
pass
def _lowerCamelCase ( self : Union[str, Any] ):
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A , nn.Linear ) )
def _lowerCamelCase ( self : Optional[int] ):
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__A )
__UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ['input_values']
self.assertListEqual(arg_names[:1] , __A )
def _lowerCamelCase ( self : List[Any] ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
@slow
def _lowerCamelCase ( self : Tuple ):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = ASTModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def lowercase__ ( ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' )
__UpperCamelCase , __UpperCamelCase = torchaudio.load(__lowercase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCamelCase ( self : Tuple ):
return (
ASTFeatureExtractor.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' )
if is_torchaudio_available()
else None
)
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
__UpperCamelCase = self.default_feature_extractor
__UpperCamelCase = ASTForAudioClassification.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' ).to(__A )
__UpperCamelCase = self.default_feature_extractor
__UpperCamelCase , __UpperCamelCase = prepare_audio()
__UpperCamelCase = audio.squeeze().numpy()
__UpperCamelCase = feature_extractor(__A , sampling_rate=__A , return_tensors='pt' ).to(__A )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__A )
# verify the logits
__UpperCamelCase = torch.Size((1, 5_2_7) )
self.assertEqual(outputs.logits.shape , __A )
__UpperCamelCase = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1e-4 ) )
| 434 | 1 |
"""simple docstring"""
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowercase : Union[str, Any] = logging.getLogger(__name__)
_lowercase : Optional[Any] = 'Hello world! cécé herlolip'
_lowercase : str = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def lowercase__ ( snake_case_ :Any , snake_case_ :int ):
__UpperCAmelCase = BertAbsConfig(
temp_dir='''.''' , finetune_bert=snake_case_ , large=snake_case_ , share_emb=snake_case_ , use_bert_emb=snake_case_ , encoder='''bert''' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2_048 , dec_dropout=0.2 , )
__UpperCAmelCase = torch.load(snake_case_ , lambda snake_case_ , snake_case_ : storage )
__UpperCAmelCase = AbsSummarizer(snake_case_ , torch.device('''cpu''' ) , snake_case_ )
original.eval()
__UpperCAmelCase = BertAbsSummarizer(snake_case_ , torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
__UpperCAmelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
__UpperCAmelCase = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) )
__UpperCAmelCase = torch.tensor(snake_case_ ).unsqueeze(0 )
__UpperCAmelCase = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) )
__UpperCAmelCase = torch.tensor(snake_case_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__UpperCAmelCase = encoder_input_ids
__UpperCAmelCase = decoder_input_ids
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__UpperCAmelCase = original(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0]
__UpperCAmelCase = original.generator(snake_case_ )
__UpperCAmelCase = new_model(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0]
__UpperCAmelCase = new_model.generator(snake_case_ )
__UpperCAmelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) )
__UpperCAmelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) )
__UpperCAmelCase = torch.allclose(snake_case_ , snake_case_ , atol=1E-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
_lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
_lowercase : List[str] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 49 |
"""simple docstring"""
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
__snake_case = ""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> dict[str, str]:
"""simple docstring"""
__snake_case = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__snake_case = remove_duplicates(key.upper() )
__snake_case = len(SCREAMING_SNAKE_CASE )
# First fill cipher with key characters
__snake_case = {alphabet[i]: char for i, char in enumerate(SCREAMING_SNAKE_CASE )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(SCREAMING_SNAKE_CASE ) , 26 ):
__snake_case = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__snake_case = alphabet[i - offset]
__snake_case = char
return cipher_alphabet
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return "".join(cipher_map.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for ch in message.upper() )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
__snake_case = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for ch in message.upper() )
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
__snake_case = input("Enter message to encode or decode: " ).strip()
__snake_case = input("Enter keyword: " ).strip()
__snake_case = input("Encipher or decipher? E/D:" ).strip()[0].lower()
try:
__snake_case = {"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option" )
__snake_case = create_cipher_map(SCREAMING_SNAKE_CASE )
print(func(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 163 | 0 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ ):
@register_to_config
def __init__( self :Any , __A :bool , __A :Optional[int] = None , __A :Optional[int] = None ) -> str:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
SCREAMING_SNAKE_CASE__ = torch.zeros(__A , __A )
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = torch.nn.Parameter(__A )
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
def __init__( self :List[Any] , __A :VQModel , __A :CLIPTextModel , __A :CLIPTokenizer , __A :TransformeraDModel , __A :VQDiffusionScheduler , __A :LearnedClassifierFreeSamplingEmbeddings , ) -> Tuple:
"""simple docstring"""
super().__init__()
self.register_modules(
vqvae=__A , transformer=__A , text_encoder=__A , tokenizer=__A , scheduler=__A , learned_classifier_free_sampling_embeddings=__A , )
def _snake_case ( self :int , __A :int , __A :Dict , __A :Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = len(__A ) if isinstance(__A , __A ) else 1
# get prompt text embeddings
SCREAMING_SNAKE_CASE__ = self.tokenizer(
__A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
SCREAMING_SNAKE_CASE__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
SCREAMING_SNAKE_CASE__ = text_input_ids[:, : self.tokenizer.model_max_length]
SCREAMING_SNAKE_CASE__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
SCREAMING_SNAKE_CASE__ = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__A )
# duplicate text embeddings for each generation per prompt
SCREAMING_SNAKE_CASE__ = prompt_embeds.repeat_interleave(__A , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
SCREAMING_SNAKE_CASE__ = self.learned_classifier_free_sampling_embeddings.embeddings
SCREAMING_SNAKE_CASE__ = negative_prompt_embeds.unsqueeze(0 ).repeat(__A , 1 , 1 )
else:
SCREAMING_SNAKE_CASE__ = [""""""] * batch_size
SCREAMING_SNAKE_CASE__ = text_input_ids.shape[-1]
SCREAMING_SNAKE_CASE__ = self.tokenizer(
__A , padding="""max_length""" , max_length=__A , truncation=__A , return_tensors="""pt""" , )
SCREAMING_SNAKE_CASE__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
SCREAMING_SNAKE_CASE__ = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__A )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE__ = negative_prompt_embeds.shape[1]
SCREAMING_SNAKE_CASE__ = negative_prompt_embeds.repeat(1 , __A , 1 )
SCREAMING_SNAKE_CASE__ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE__ = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self :Tuple , __A :Union[str, List[str]] , __A :int = 100 , __A :float = 5.0 , __A :float = 1.0 , __A :int = 1 , __A :Optional[Union[torch.Generator, List[torch.Generator]]] = None , __A :Optional[torch.FloatTensor] = None , __A :Optional[str] = "pil" , __A :bool = True , __A :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __A :int = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
if isinstance(__A , __A ):
SCREAMING_SNAKE_CASE__ = 1
elif isinstance(__A , __A ):
SCREAMING_SNAKE_CASE__ = len(__A )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__A )}''' )
SCREAMING_SNAKE_CASE__ = batch_size * num_images_per_prompt
SCREAMING_SNAKE_CASE__ = guidance_scale > 1.0
SCREAMING_SNAKE_CASE__ = self._encode_prompt(__A , __A , __A )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__A , __A ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(__A )}.''' )
# get the initial completely masked latents unless the user supplied it
SCREAMING_SNAKE_CASE__ = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
SCREAMING_SNAKE_CASE__ = self.transformer.num_vector_embeds - 1
SCREAMING_SNAKE_CASE__ = torch.full(__A , __A ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
SCREAMING_SNAKE_CASE__ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__A , device=self.device )
SCREAMING_SNAKE_CASE__ = self.scheduler.timesteps.to(self.device )
SCREAMING_SNAKE_CASE__ = latents
for i, t in enumerate(self.progress_bar(__A ) ):
# expand the sample if we are doing classifier free guidance
SCREAMING_SNAKE_CASE__ = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
SCREAMING_SNAKE_CASE__ = self.transformer(__A , encoder_hidden_states=__A , timestep=__A ).sample
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model_output.chunk(2 )
SCREAMING_SNAKE_CASE__ = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__A , dim=1 , keepdim=__A )
SCREAMING_SNAKE_CASE__ = self.truncate(__A , __A )
# remove `log(0)`'s (`-inf`s)
SCREAMING_SNAKE_CASE__ = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE__ = self.scheduler.step(__A , timestep=__A , sample=__A , generator=__A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__A , __A , __A )
SCREAMING_SNAKE_CASE__ = self.vqvae.config.vq_embed_dim
SCREAMING_SNAKE_CASE__ = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
SCREAMING_SNAKE_CASE__ = self.vqvae.quantize.get_codebook_entry(__A , shape=__A )
SCREAMING_SNAKE_CASE__ = self.vqvae.decode(__A , force_not_quantize=__A ).sample
SCREAMING_SNAKE_CASE__ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ = self.numpy_to_pil(__A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__A )
def _snake_case ( self :Any , __A :torch.FloatTensor , __A :float ) -> torch.FloatTensor:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = torch.sort(__A , 1 , descending=__A )
SCREAMING_SNAKE_CASE__ = torch.exp(__A )
SCREAMING_SNAKE_CASE__ = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
SCREAMING_SNAKE_CASE__ = torch.full_like(keep_mask[:, 0:1, :] , __A )
SCREAMING_SNAKE_CASE__ = torch.cat((all_true, keep_mask) , dim=1 )
SCREAMING_SNAKE_CASE__ = keep_mask[:, :-1, :]
SCREAMING_SNAKE_CASE__ = keep_mask.gather(1 , indices.argsort(1 ) )
SCREAMING_SNAKE_CASE__ = log_p_x_0.clone()
SCREAMING_SNAKE_CASE__ = -torch.inf # -inf = log(0)
return rv | 59 |
from math import pow, sqrt
def SCREAMING_SNAKE_CASE__ ( *UpperCamelCase__: float ):
SCREAMING_SNAKE_CASE__ = len(UpperCamelCase__ ) > 0 and all(value > 0.0 for value in values )
return result
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: float ):
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(UpperCamelCase__ , UpperCamelCase__ )
else ValueError("""Input Error: Molar mass values must greater than 0.""" )
)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: float , UpperCamelCase__: float ):
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: float , UpperCamelCase__: float ):
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: float , UpperCamelCase__: float ):
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: float , UpperCamelCase__: float ):
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
) | 59 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ : int = {'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Tuple = ['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : List[str] = ['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = [
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Tuple = [
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
lowercase_ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 572 |
"""simple docstring"""
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def _lowerCAmelCase ( lowerCamelCase__ : Dict, lowerCamelCase__ : Optional[Any] ) -> Any:
_SCREAMING_SNAKE_CASE : Optional[int] = XCLIPTextConfig()
# derive patch size from model name
_SCREAMING_SNAKE_CASE : str = model_name.find("patch" )
_SCREAMING_SNAKE_CASE : int = int(model_name[start_idx + len("patch" ) : start_idx + len("patch" ) + 2] )
_SCREAMING_SNAKE_CASE : Dict = XCLIPVisionConfig(patch_size=lowerCamelCase__, num_frames=lowerCamelCase__ )
if "large" in model_name:
_SCREAMING_SNAKE_CASE : str = 7_6_8
_SCREAMING_SNAKE_CASE : Union[str, Any] = 3_0_7_2
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1_2
_SCREAMING_SNAKE_CASE : int = 1_0_2_4
_SCREAMING_SNAKE_CASE : Union[str, Any] = 4_0_9_6
_SCREAMING_SNAKE_CASE : int = 1_6
_SCREAMING_SNAKE_CASE : Tuple = 2_4
_SCREAMING_SNAKE_CASE : List[str] = 7_6_8
_SCREAMING_SNAKE_CASE : Optional[Any] = 3_0_7_2
if model_name == "xclip-large-patch14-16-frames":
_SCREAMING_SNAKE_CASE : Dict = 3_3_6
_SCREAMING_SNAKE_CASE : List[str] = XCLIPConfig.from_text_vision_configs(lowerCamelCase__, lowerCamelCase__ )
if "large" in model_name:
_SCREAMING_SNAKE_CASE : str = 7_6_8
return config
def _lowerCAmelCase ( lowerCamelCase__ : Any ) -> int:
# text encoder
if name == "token_embedding.weight":
_SCREAMING_SNAKE_CASE : List[Any] = name.replace("token_embedding.weight", "text_model.embeddings.token_embedding.weight" )
if name == "positional_embedding":
_SCREAMING_SNAKE_CASE : List[Any] = name.replace("positional_embedding", "text_model.embeddings.position_embedding.weight" )
if "ln_1" in name:
_SCREAMING_SNAKE_CASE : Any = name.replace("ln_1", "layer_norm1" )
if "ln_2" in name:
_SCREAMING_SNAKE_CASE : int = name.replace("ln_2", "layer_norm2" )
if "c_fc" in name:
_SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("c_fc", "fc1" )
if "c_proj" in name:
_SCREAMING_SNAKE_CASE : Tuple = name.replace("c_proj", "fc2" )
if name.startswith("transformer.resblocks" ):
_SCREAMING_SNAKE_CASE : List[str] = name.replace("transformer.resblocks", "text_model.encoder.layers" )
if "attn.out_proj" in name and "message" not in name:
_SCREAMING_SNAKE_CASE : Optional[int] = name.replace("attn.out_proj", "self_attn.out_proj" )
if "ln_final" in name:
_SCREAMING_SNAKE_CASE : Optional[int] = name.replace("ln_final", "text_model.final_layer_norm" )
# visual encoder
if name == "visual.class_embedding":
_SCREAMING_SNAKE_CASE : List[str] = name.replace("visual.class_embedding", "vision_model.embeddings.class_embedding" )
if name == "visual.positional_embedding":
_SCREAMING_SNAKE_CASE : Optional[int] = name.replace("visual.positional_embedding", "vision_model.embeddings.position_embedding.weight" )
if name.startswith("visual.transformer.resblocks" ):
_SCREAMING_SNAKE_CASE : Any = name.replace("visual.transformer.resblocks", "vision_model.encoder.layers" )
if "visual.conv1" in name:
_SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("visual.conv1", "vision_model.embeddings.patch_embedding" )
if "visual.ln_pre" in name:
_SCREAMING_SNAKE_CASE : Any = name.replace("visual.ln_pre", "vision_model.pre_layernorm" )
if "visual.ln_post" in name:
_SCREAMING_SNAKE_CASE : Any = name.replace("visual.ln_post", "vision_model.post_layernorm" )
if "visual.proj" in name:
_SCREAMING_SNAKE_CASE : List[str] = name.replace("visual.proj", "visual_projection.weight" )
if "text_projection" in name:
_SCREAMING_SNAKE_CASE : List[str] = name.replace("text_projection", "text_projection.weight" )
# things on top
if "prompts_visual_proj" in name:
_SCREAMING_SNAKE_CASE : str = name.replace("prompts_visual_proj", "prompts_visual_projection" )
if "prompts_visual_ln" in name:
_SCREAMING_SNAKE_CASE : Any = name.replace("prompts_visual_ln", "prompts_visual_layernorm" )
# mit
if name == "mit.positional_embedding":
_SCREAMING_SNAKE_CASE : int = name.replace("positional", "position" )
if name.startswith("mit.resblocks" ):
_SCREAMING_SNAKE_CASE : Any = name.replace("mit.resblocks", "mit.encoder.layers" )
# prompts generator
if name.startswith("prompts_generator.norm" ):
_SCREAMING_SNAKE_CASE : int = name.replace("prompts_generator.norm", "prompts_generator.layernorm" )
return name
def _lowerCAmelCase ( lowerCamelCase__ : Any, lowerCamelCase__ : Union[str, Any] ) -> str:
for key in orig_state_dict.copy().keys():
_SCREAMING_SNAKE_CASE : Optional[Any] = orig_state_dict.pop(lowerCamelCase__ )
if "attn.in_proj" in key:
_SCREAMING_SNAKE_CASE : int = key.split("." )
if key.startswith("visual" ):
_SCREAMING_SNAKE_CASE : Tuple = key_split[3]
_SCREAMING_SNAKE_CASE : Optional[Any] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
_SCREAMING_SNAKE_CASE : Union[str, Any] = val[
:dim, :
]
_SCREAMING_SNAKE_CASE : Optional[Any] = val[
dim : dim * 2, :
]
_SCREAMING_SNAKE_CASE : Optional[Any] = val[
-dim:, :
]
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = val[
:dim
]
_SCREAMING_SNAKE_CASE : List[Any] = val[
dim : dim * 2
]
_SCREAMING_SNAKE_CASE : Any = val[
-dim:
]
else:
if "weight" in key:
_SCREAMING_SNAKE_CASE : Dict = val[
:dim, :
]
_SCREAMING_SNAKE_CASE : Union[str, Any] = val[
dim : dim * 2, :
]
_SCREAMING_SNAKE_CASE : Tuple = val[
-dim:, :
]
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = val[:dim]
_SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2
]
_SCREAMING_SNAKE_CASE : Any = val[-dim:]
elif key.startswith("mit" ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = key_split[2]
_SCREAMING_SNAKE_CASE : List[str] = config.vision_config.mit_hidden_size
if "weight" in key:
_SCREAMING_SNAKE_CASE : int = val[:dim, :]
_SCREAMING_SNAKE_CASE : Dict = val[dim : dim * 2, :]
_SCREAMING_SNAKE_CASE : List[Any] = val[-dim:, :]
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = val[:dim]
_SCREAMING_SNAKE_CASE : List[Any] = val[dim : dim * 2]
_SCREAMING_SNAKE_CASE : Optional[Any] = val[-dim:]
else:
_SCREAMING_SNAKE_CASE : Tuple = key_split[2]
_SCREAMING_SNAKE_CASE : List[str] = config.text_config.hidden_size
if "weight" in key:
_SCREAMING_SNAKE_CASE : Tuple = val[:dim, :]
_SCREAMING_SNAKE_CASE : Optional[int] = val[
dim : dim * 2, :
]
_SCREAMING_SNAKE_CASE : int = val[-dim:, :]
else:
_SCREAMING_SNAKE_CASE : int = val[:dim]
_SCREAMING_SNAKE_CASE : Tuple = val[
dim : dim * 2
]
_SCREAMING_SNAKE_CASE : Any = val[-dim:]
else:
_SCREAMING_SNAKE_CASE : str = rename_key(lowerCamelCase__ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
_SCREAMING_SNAKE_CASE : List[str] = val.T
_SCREAMING_SNAKE_CASE : Dict = val
return orig_state_dict
def _lowerCAmelCase ( lowerCamelCase__ : List[Any] ) -> Dict:
if num_frames == 8:
_SCREAMING_SNAKE_CASE : Union[str, Any] = "eating_spaghetti_8_frames.npy"
elif num_frames == 1_6:
_SCREAMING_SNAKE_CASE : Any = "eating_spaghetti.npy"
elif num_frames == 3_2:
_SCREAMING_SNAKE_CASE : Union[str, Any] = "eating_spaghetti_32_frames.npy"
_SCREAMING_SNAKE_CASE : Any = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video", filename=lowerCamelCase__, repo_type="dataset", )
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.load(lowerCamelCase__ )
return list(lowerCamelCase__ )
def _lowerCAmelCase ( lowerCamelCase__ : int, lowerCamelCase__ : Optional[Any]=None, lowerCamelCase__ : Optional[Any]=False ) -> List[str]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
# fully supervised kinetics-400 checkpoints
"xclip-base-patch32": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth",
"xclip-base-patch32-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"
),
"xclip-base-patch16": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth",
"xclip-base-patch16-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"
),
"xclip-large-patch14": "https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb",
"xclip-large-patch14-16-frames": "https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f",
# fully supervised kinetics-600 checkpoints
"xclip-base-patch16-kinetics-600": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"
),
"xclip-base-patch16-kinetics-600-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"
),
"xclip-large-patch14-kinetics-600": "https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be",
# few shot
"xclip-base-patch16-hmdb-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"
),
"xclip-base-patch16-hmdb-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"
),
"xclip-base-patch16-hmdb-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"
),
"xclip-base-patch16-hmdb-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"
),
"xclip-base-patch16-ucf-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"
),
"xclip-base-patch16-ucf-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"
),
"xclip-base-patch16-ucf-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"
),
"xclip-base-patch16-ucf-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"
),
# zero shot
"xclip-base-patch16-zero-shot": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth",
}
_SCREAMING_SNAKE_CASE : str = model_to_url[model_name]
_SCREAMING_SNAKE_CASE : Optional[int] = 8
if "16-frames" in model_name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1_6
elif "shot" in model_name:
_SCREAMING_SNAKE_CASE : List[Any] = 3_2
_SCREAMING_SNAKE_CASE : Union[str, Any] = get_xclip_config(lowerCamelCase__, lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = XCLIPModel(lowerCamelCase__ )
model.eval()
if "drive" in checkpoint_url:
_SCREAMING_SNAKE_CASE : Dict = "pytorch_model.bin"
gdown.cached_download(lowerCamelCase__, lowerCamelCase__, quiet=lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : Tuple = torch.load(lowerCamelCase__, map_location="cpu" )["model"]
else:
_SCREAMING_SNAKE_CASE : List[str] = torch.hub.load_state_dict_from_url(lowerCamelCase__ )["model"]
_SCREAMING_SNAKE_CASE : str = convert_state_dict(lowerCamelCase__, lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : Any = XCLIPModel(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = model.load_state_dict(lowerCamelCase__, strict=lowerCamelCase__ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
_SCREAMING_SNAKE_CASE : Any = 3_3_6 if model_name == "xclip-large-patch14-16-frames" else 2_2_4
_SCREAMING_SNAKE_CASE : List[Any] = VideoMAEImageProcessor(size=lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32" )
_SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32" )
_SCREAMING_SNAKE_CASE : List[str] = XCLIPProcessor(image_processor=lowerCamelCase__, tokenizer=lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : int = prepare_video(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : Optional[int] = processor(
text=["playing sports", "eating spaghetti", "go shopping"], videos=lowerCamelCase__, return_tensors="pt", padding=lowerCamelCase__ )
print("Shape of pixel values:", inputs.pixel_values.shape )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(**lowerCamelCase__ )
# Verify outputs
_SCREAMING_SNAKE_CASE : Any = outputs.logits_per_video
_SCREAMING_SNAKE_CASE : Dict = logits_per_video.softmax(dim=1 )
print("Probs:", lowerCamelCase__ )
# kinetics-400
if model_name == "xclip-base-patch32":
_SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
_SCREAMING_SNAKE_CASE : Any = torch.tensor([[7.0_999E-04, 9.9_883E-01, 4.5_580E-04]] )
elif model_name == "xclip-base-patch16":
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
_SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[7.6_937E-04, 9.9_728E-01, 1.9_473E-03]] )
elif model_name == "xclip-large-patch14":
_SCREAMING_SNAKE_CASE : str = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
_SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[3.3_877E-04, 9.9_937E-01, 2.8_888E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
_SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
_SCREAMING_SNAKE_CASE : int = torch.tensor([[3.8_554E-04, 9.9_929E-01, 3.2_754E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
_SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[7.1_890E-06, 9.9_994E-01, 5.6_559E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
_SCREAMING_SNAKE_CASE : Any = torch.tensor([[1.0_320E-05, 9.9_993E-01, 6.2_435E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
_SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[4.1_377E-06, 9.9_990E-01, 9.8_386E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
_SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[4.1_347E-05, 9.9_962E-01, 3.3_411E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
_SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[8.5_857E-05, 9.9_928E-01, 6.3_291E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
_SCREAMING_SNAKE_CASE : str = torch.tensor([[8.5_857E-05, 9.9_928E-01, 6.3_291E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[9.8_219E-04, 9.9_593E-01, 3.0_863E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
_SCREAMING_SNAKE_CASE : Dict = torch.tensor([[3.5_082E-04, 9.9_785E-01, 1.7_966E-03]] )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
assert torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print("Pushing model, processor and slow tokenizer files to the hub..." )
model.push_to_hub(lowerCamelCase__, organization="nielsr" )
processor.push_to_hub(lowerCamelCase__, organization="nielsr" )
slow_tokenizer.push_to_hub(lowerCamelCase__, organization="nielsr" )
if __name__ == "__main__":
lowercase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''xclip-base-patch32''',
type=str,
help='''Name of the model.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowercase_ : str = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 572 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase_ : Tuple = logging.get_logger(__name__)
lowerCAmelCase_ : List[Any] = {
"""ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( _snake_case ):
'''simple docstring'''
UpperCAmelCase__ = 'deta'
UpperCAmelCase__ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Tuple , lowercase__ : List[Any]=None , lowercase__ : Any=900 , lowercase__ : int=2_048 , lowercase__ : Any=6 , lowercase__ : Optional[Any]=2_048 , lowercase__ : str=8 , lowercase__ : Union[str, Any]=6 , lowercase__ : List[str]=1_024 , lowercase__ : int=8 , lowercase__ : Any=0.0 , lowercase__ : Any=True , lowercase__ : Optional[int]="relu" , lowercase__ : int=256 , lowercase__ : Tuple=0.1 , lowercase__ : Optional[Any]=0.0 , lowercase__ : Tuple=0.0 , lowercase__ : List[str]=0.0_2 , lowercase__ : Any=1.0 , lowercase__ : Optional[int]=True , lowercase__ : int=False , lowercase__ : Optional[Any]="sine" , lowercase__ : Dict=5 , lowercase__ : List[Any]=4 , lowercase__ : Optional[Any]=4 , lowercase__ : Any=True , lowercase__ : int=300 , lowercase__ : Any=True , lowercase__ : Tuple=True , lowercase__ : int=1 , lowercase__ : Tuple=5 , lowercase__ : Union[str, Any]=2 , lowercase__ : Tuple=1 , lowercase__ : int=1 , lowercase__ : str=5 , lowercase__ : Optional[Any]=2 , lowercase__ : List[Any]=0.1 , lowercase__ : Union[str, Any]=0.2_5 , **lowercase__ : int , ) ->List[str]:
'''simple docstring'''
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_UpperCamelCase : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(lowercase__ , lowercase__ ):
_UpperCamelCase : List[Any] = backbone_config.pop("model_type" )
_UpperCamelCase : Dict = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase : Dict = config_class.from_dict(lowercase__ )
_UpperCamelCase : Optional[Any] = backbone_config
_UpperCamelCase : Tuple = num_queries
_UpperCamelCase : Optional[int] = max_position_embeddings
_UpperCamelCase : Tuple = d_model
_UpperCamelCase : Optional[int] = encoder_ffn_dim
_UpperCamelCase : int = encoder_layers
_UpperCamelCase : Optional[Any] = encoder_attention_heads
_UpperCamelCase : Union[str, Any] = decoder_ffn_dim
_UpperCamelCase : str = decoder_layers
_UpperCamelCase : Union[str, Any] = decoder_attention_heads
_UpperCamelCase : List[Any] = dropout
_UpperCamelCase : Optional[Any] = attention_dropout
_UpperCamelCase : Union[str, Any] = activation_dropout
_UpperCamelCase : List[Any] = activation_function
_UpperCamelCase : Any = init_std
_UpperCamelCase : Dict = init_xavier_std
_UpperCamelCase : Union[str, Any] = encoder_layerdrop
_UpperCamelCase : Tuple = auxiliary_loss
_UpperCamelCase : Optional[int] = position_embedding_type
# deformable attributes
_UpperCamelCase : Dict = num_feature_levels
_UpperCamelCase : List[Any] = encoder_n_points
_UpperCamelCase : Union[str, Any] = decoder_n_points
_UpperCamelCase : str = two_stage
_UpperCamelCase : Any = two_stage_num_proposals
_UpperCamelCase : List[str] = with_box_refine
_UpperCamelCase : Optional[int] = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
_UpperCamelCase : List[str] = class_cost
_UpperCamelCase : str = bbox_cost
_UpperCamelCase : List[str] = giou_cost
# Loss coefficients
_UpperCamelCase : List[str] = mask_loss_coefficient
_UpperCamelCase : Optional[int] = dice_loss_coefficient
_UpperCamelCase : Union[str, Any] = bbox_loss_coefficient
_UpperCamelCase : Any = giou_loss_coefficient
_UpperCamelCase : Optional[int] = eos_coefficient
_UpperCamelCase : Union[str, Any] = focal_alpha
super().__init__(is_encoder_decoder=lowercase__ , **lowercase__ )
@property
def snake_case__ ( self : Any ) ->Any:
'''simple docstring'''
return self.encoder_attention_heads
@property
def snake_case__ ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
return self.d_model
def snake_case__ ( self : Tuple ) ->str:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = copy.deepcopy(self.__dict__ )
_UpperCamelCase : Union[str, Any] = self.backbone_config.to_dict()
_UpperCamelCase : Optional[int] = self.__class__.model_type
return output
| 713 | '''simple docstring'''
from __future__ import annotations
lowerCAmelCase_ : Optional[Any] = """Muhammad Umer Farooq"""
lowerCAmelCase_ : str = """MIT"""
lowerCAmelCase_ : Optional[Any] = """1.0.0"""
lowerCAmelCase_ : Union[str, Any] = """Muhammad Umer Farooq"""
lowerCAmelCase_ : Any = """[email protected]"""
lowerCAmelCase_ : Dict = """Alpha"""
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase__ : str ) ->None:
'''simple docstring'''
super().__init__()
_UpperCamelCase : list[str] = []
_UpperCamelCase : int = domain
def snake_case__ ( self : str , lowercase__ : str , lowercase__ : list[tuple[str, str | None]] ) ->None:
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
_UpperCamelCase : Optional[Any] = parse.urljoin(self.domain , lowercase__ )
self.urls.append(lowercase__ )
def __A ( UpperCAmelCase ) -> str:
'''simple docstring'''
return ".".join(get_sub_domain_name(UpperCAmelCase ).split("." )[-2:] )
def __A ( UpperCAmelCase ) -> str:
'''simple docstring'''
return parse.urlparse(UpperCAmelCase ).netloc
def __A ( UpperCAmelCase = "https://github.com" ) -> list[str]:
'''simple docstring'''
_UpperCamelCase : int = get_domain_name(UpperCAmelCase )
# Initialize the parser
_UpperCamelCase : Any = Parser(UpperCAmelCase )
try:
# Open URL
_UpperCamelCase : Union[str, Any] = requests.get(UpperCAmelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
_UpperCamelCase : int = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
_UpperCamelCase : Dict = requests.get(UpperCAmelCase )
# Get the valid email.
_UpperCamelCase : List[str] = re.findall("[a-zA-Z0-9]+@" + domain ,read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(UpperCAmelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ : List[str] = emails_from_url("""https://github.com""")
print(f"""{len(emails)} emails found:""")
print("""\n""".join(sorted(emails)))
| 204 | 0 |
"""simple docstring"""
a : List[Any] = 8.3_14_45_98
def lowercase__(A , A ) ->float:
"""simple docstring"""
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
a : Dict = 300
a : Tuple = 28
a : Optional[int] = rms_speed_of_molecule(temperature, molar_mass)
print(F"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 218 |
"""simple docstring"""
def lowercase__(A ) ->bool:
"""simple docstring"""
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 218 | 1 |
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
__lowercase : str = 4
__lowercase : Dict = 3
class _A ( _UpperCamelCase ):
'''simple docstring'''
pass
def lowercase ( __A : Tuple ) -> List[Any]:
'''simple docstring'''
for shard in shards:
for i in range(lowerCamelCase__ ):
yield {"i": i, "shard": shard}
def lowercase ( ) -> Dict:
'''simple docstring'''
snake_case : Dict = int(os.environ["""RANK"""] )
snake_case : Optional[Any] = int(os.environ["""WORLD_SIZE"""] )
snake_case : Tuple = ArgumentParser()
parser.add_argument("""--streaming""" , type=lowerCamelCase__ )
parser.add_argument("""--local_rank""" , type=lowerCamelCase__ )
parser.add_argument("""--num_workers""" , type=lowerCamelCase__ , default=0 )
snake_case : Union[str, Any] = parser.parse_args()
snake_case : Optional[int] = args.streaming
snake_case : Union[str, Any] = args.num_workers
snake_case : Optional[Any] = {"shards": [f"""shard_{shard_idx}""" for shard_idx in range(lowerCamelCase__ )]}
snake_case : Optional[int] = IterableDataset.from_generator(lowerCamelCase__ , gen_kwargs=lowerCamelCase__ )
if not streaming:
snake_case : Any = Dataset.from_list(list(lowerCamelCase__ ) )
snake_case : List[Any] = split_dataset_by_node(lowerCamelCase__ , rank=lowerCamelCase__ , world_size=lowerCamelCase__ )
snake_case : Optional[Any] = torch.utils.data.DataLoader(lowerCamelCase__ , num_workers=lowerCamelCase__ )
snake_case : List[Any] = NUM_SHARDS * NUM_ITEMS_PER_SHARD
snake_case : str = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
snake_case : Optional[int] = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 715 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Optional[int] = logging.get_logger(__name__)
def lowercase ( __A : str ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[int] = torch.load(__A , map_location="""cpu""" )
if "model" in sd.keys():
snake_case : Any = torch.load(__A , map_location="""cpu""" )["""model"""]
# pop unnecessary weights
snake_case : Optional[Any] = [
"""decoder.version""",
"""decoder.output_projection.weight""",
]
for key in keys_to_delete:
if key in sd:
sd.pop(__A )
snake_case : List[Any] = {
"""decoder.project_in_dim.weight""": """decoder.project_in.weight""",
"""decoder.project_out_dim.weight""": """decoder.project_out.weight""",
"""decoder.layer_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.layer_norm.bias""": """decoder.final_layer_norm.bias""",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
snake_case : int = sd.pop(__A )
snake_case : Optional[int] = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
snake_case : List[str] = sd[key]
# We split QKV in separate Q,K,V
snake_case : Dict = key.replace(""".qkv_proj.""" , """.q_proj.""" )
snake_case : Any = key.replace(""".qkv_proj.""" , """.k_proj.""" )
snake_case : List[str] = key.replace(""".qkv_proj.""" , """.v_proj.""" )
snake_case : List[Any] = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
snake_case , snake_case , snake_case : str = torch.split(__A , depth // 3 , dim=0 )
snake_case : Tuple = q
snake_case : List[Any] = k
snake_case : List[Any] = v
del sd[key]
return sd
@torch.no_grad()
def lowercase ( __A : Optional[Any] , __A : Tuple , __A : List[str]=None ) -> Optional[int]:
'''simple docstring'''
snake_case : Any = load_checkpoint(__A )
if config is not None:
snake_case : List[Any] = OPTConfig.from_pretrained(__A )
else:
snake_case : Any = OPTConfig()
snake_case : Union[str, Any] = OPTModel(__A ).half().eval()
model.load_state_dict(__A )
# Check results
Path(__A ).mkdir(exist_ok=__A )
model.save_pretrained(__A )
if __name__ == "__main__":
__lowercase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
__lowercase : Any = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 315 | 0 |
'''simple docstring'''
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _A ( __A , __A , __A ):
lowercase__: Optional[int] = [R"h\.\d+\.attn\.bias", R"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self : str , __magic_name__ : int , __magic_name__ : int , __magic_name__ : Optional[int] = None , __magic_name__ : int = 5_02_57 , __magic_name__ : int = 10_24 , __magic_name__ : int = 7_68 , __magic_name__ : int = 12 , __magic_name__ : int = 12 , __magic_name__ : Optional[int] = None , __magic_name__ : str = "gelu_new" , __magic_name__ : float = 0.1 , __magic_name__ : float = 0.1 , __magic_name__ : float = 0.1 , __magic_name__ : float = 1E-5 , __magic_name__ : float = 0.02 , __magic_name__ : bool = True , __magic_name__ : bool = True , __magic_name__ : bool = False , __magic_name__ : bool = False , ) -> Tuple:
"""simple docstring"""
super().__init__()
__snake_case : List[Any] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
f''' `n_embd`: {n_embd} are not equal.''' )
__snake_case : Dict = prefix_inner_dim
__snake_case : Tuple = prefix_hidden_dim
__snake_case : Optional[int] = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
__snake_case : Any = (
nn.Linear(self.prefix_hidden_dim , _UpperCamelCase ) if self.prefix_hidden_dim is not None else nn.Identity()
)
__snake_case : int = GPTaConfig(
vocab_size=_UpperCamelCase , n_positions=_UpperCamelCase , n_embd=_UpperCamelCase , n_layer=_UpperCamelCase , n_head=_UpperCamelCase , n_inner=_UpperCamelCase , activation_function=_UpperCamelCase , resid_pdrop=_UpperCamelCase , embd_pdrop=_UpperCamelCase , attn_pdrop=_UpperCamelCase , layer_norm_epsilon=_UpperCamelCase , initializer_range=_UpperCamelCase , scale_attn_weights=_UpperCamelCase , use_cache=_UpperCamelCase , scale_attn_by_inverse_layer_idx=_UpperCamelCase , reorder_and_upcast_attn=_UpperCamelCase , )
__snake_case : Tuple = GPTaLMHeadModel(_UpperCamelCase )
def lowercase__ ( self : str , __magic_name__ : torch.Tensor , __magic_name__ : torch.Tensor , __magic_name__ : Optional[torch.Tensor] = None , __magic_name__ : Optional[torch.Tensor] = None , ) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = self.transformer.transformer.wte(_UpperCamelCase )
__snake_case : str = self.encode_prefix(_UpperCamelCase )
__snake_case : List[str] = self.decode_prefix(_UpperCamelCase )
__snake_case : List[Any] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
__snake_case : Optional[int] = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
__snake_case : Tuple = torch.cat((dummy_token, input_ids) , dim=1 )
__snake_case : Any = self.transformer(inputs_embeds=_UpperCamelCase , labels=_UpperCamelCase , attention_mask=_UpperCamelCase )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def lowercase__ ( self : Any , __magic_name__ : int , __magic_name__ : torch.device ) -> torch.Tensor:
"""simple docstring"""
return torch.zeros(_UpperCamelCase , self.prefix_length , dtype=torch.intaa , device=_UpperCamelCase )
def lowercase__ ( self : int , __magic_name__ : List[str] ) -> List[Any]:
"""simple docstring"""
return self.encode_prefix(_UpperCamelCase )
@torch.no_grad()
def lowercase__ ( self : List[str] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : str ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[Any] = torch.split(_UpperCamelCase , 1 , dim=0 )
__snake_case : List[Any] = []
__snake_case : Optional[int] = []
for feature in features:
__snake_case : List[Any] = self.decode_prefix(feature.to(_UpperCamelCase ) ) # back to the clip feature
# Only support beam search for now
__snake_case , __snake_case : int = self.generate_beam(
input_embeds=_UpperCamelCase , device=_UpperCamelCase , eos_token_id=_UpperCamelCase )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
__snake_case : Union[str, Any] = torch.stack(_UpperCamelCase )
__snake_case : List[Any] = torch.stack(_UpperCamelCase )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def lowercase__ ( self : Any , __magic_name__ : List[Any]=None , __magic_name__ : int=None , __magic_name__ : List[str]=None , __magic_name__ : int = 5 , __magic_name__ : int = 67 , __magic_name__ : float = 1.0 , __magic_name__ : Optional[int] = None , ) -> List[str]:
"""simple docstring"""
__snake_case : int = eos_token_id
__snake_case : int = None
__snake_case : Union[str, Any] = None
__snake_case : List[str] = torch.ones(_UpperCamelCase , device=_UpperCamelCase , dtype=torch.int )
__snake_case : Union[str, Any] = torch.zeros(_UpperCamelCase , device=_UpperCamelCase , dtype=torch.bool )
if input_embeds is not None:
__snake_case : int = input_embeds
else:
__snake_case : str = self.transformer.transformer.wte(_UpperCamelCase )
for i in range(_UpperCamelCase ):
__snake_case : Tuple = self.transformer(inputs_embeds=_UpperCamelCase )
__snake_case : str = outputs.logits
__snake_case : Dict = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
__snake_case : Dict = logits.softmax(-1 ).log()
if scores is None:
__snake_case , __snake_case : Optional[int] = logits.topk(_UpperCamelCase , -1 )
__snake_case : Tuple = generated.expand(_UpperCamelCase , *generated.shape[1:] )
__snake_case , __snake_case : Optional[int] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
__snake_case : List[Any] = next_tokens
else:
__snake_case : Any = tokens.expand(_UpperCamelCase , *tokens.shape[1:] )
__snake_case : Dict = torch.cat((tokens, next_tokens) , dim=1 )
else:
__snake_case : Optional[int] = -float(np.inf )
__snake_case : List[str] = 0
__snake_case : Optional[int] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
__snake_case : List[str] = scores_sum / seq_lengths[:, None]
__snake_case , __snake_case : Union[str, Any] = scores_sum_average.view(-1 ).topk(_UpperCamelCase , -1 )
__snake_case : Optional[int] = next_tokens // scores_sum.shape[1]
__snake_case : List[str] = seq_lengths[next_tokens_source]
__snake_case : Dict = next_tokens % scores_sum.shape[1]
__snake_case : int = next_tokens.unsqueeze(1 )
__snake_case : int = tokens[next_tokens_source]
__snake_case : int = torch.cat((tokens, next_tokens) , dim=1 )
__snake_case : Optional[int] = generated[next_tokens_source]
__snake_case : str = scores_sum_average * seq_lengths
__snake_case : Dict = is_stopped[next_tokens_source]
__snake_case : Union[str, Any] = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
__snake_case : Optional[int] = torch.cat((generated, next_token_embed) , dim=1 )
__snake_case : Any = is_stopped + next_tokens.eq(_UpperCamelCase ).squeeze()
if is_stopped.all():
break
__snake_case : List[Any] = scores / seq_lengths
__snake_case : Optional[int] = scores.argsort(descending=_UpperCamelCase )
# tokens tensors are already padded to max_seq_length
__snake_case : Optional[int] = [tokens[i] for i in order]
__snake_case : List[Any] = torch.stack(_UpperCamelCase , dim=0 )
__snake_case : List[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 26 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
lowerCAmelCase_ = logging.getLogger(__name__)
lowerCAmelCase_ = '''pytorch_model.bin'''
@dataclasses.dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default=__A , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
SCREAMING_SNAKE_CASE : str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default=__A , metadata={"help": "A csv or a json file containing the validation data."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default=__A , metadata={"help": "The name of the task to train on."} , )
SCREAMING_SNAKE_CASE : Optional[List[str]] = dataclasses.field(
default=__A , metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
SCREAMING_SNAKE_CASE : Optional[int] = dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
SCREAMING_SNAKE_CASE : Optional[float] = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
SCREAMING_SNAKE_CASE : Optional[bool] = dataclasses.field(
default=__A , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
SCREAMING_SNAKE_CASE : Optional[bool] = dataclasses.field(
default=__A , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
SCREAMING_SNAKE_CASE : Optional[bool] = dataclasses.field(
default=__A , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
SCREAMING_SNAKE_CASE : Optional[float] = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
SCREAMING_SNAKE_CASE : Optional[int] = dataclasses.field(
default=100 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
SCREAMING_SNAKE_CASE : Optional[int] = dataclasses.field(
default=__A , metadata={"help": "Random seed for initialization."} , )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
snake_case_ = dataset.filter(lambda SCREAMING_SNAKE_CASE__ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
snake_case_ = int(eval_result * len(SCREAMING_SNAKE_CASE__ ) )
print(SCREAMING_SNAKE_CASE__ )
snake_case_ = dataset.sort('''probability''' , reverse=SCREAMING_SNAKE_CASE__ )
snake_case_ = dataset.select(range(SCREAMING_SNAKE_CASE__ ) )
snake_case_ = dataset.remove_columns(['''label''', '''probability'''] )
snake_case_ = dataset.rename_column('''prediction''' , '''label''' )
snake_case_ = dataset.map(lambda SCREAMING_SNAKE_CASE__ : {"label": idalabel[example["label"]]} )
snake_case_ = dataset.shuffle(seed=args.seed )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , F'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ )
else:
dataset.to_json(SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
snake_case_ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
snake_case_ = STModelArguments(model_name_or_path=SCREAMING_SNAKE_CASE__ )
snake_case_ = STDataArguments(train_file=SCREAMING_SNAKE_CASE__ , infer_file=SCREAMING_SNAKE_CASE__ )
snake_case_ = STTrainingArguments(output_dir=SCREAMING_SNAKE_CASE__ )
snake_case_ = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(SCREAMING_SNAKE_CASE__ ).items():
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for key, value in kwargs.items():
if hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Sanity checks
snake_case_ = {}
snake_case_ = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
snake_case_ = args.train_file
snake_case_ = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
snake_case_ = args.eval_file
for key in data_files:
snake_case_ = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], F'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
snake_case_ = extension
else:
assert extension == args.data_file_extension, F'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), F'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
snake_case_ = F'''{args.output_dir}/self-train_iter-{{}}'''.format
snake_case_ = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=SCREAMING_SNAKE_CASE__ )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
snake_case_ = None
snake_case_ = None
snake_case_ = 0
snake_case_ = False
# Show the progress bar
snake_case_ = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
snake_case_ = data_dir_format(SCREAMING_SNAKE_CASE__ )
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''stage-1''' )
snake_case_ = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
arguments_dict.update({key: value} )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' , SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , SCREAMING_SNAKE_CASE__ )
finetune(**SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , SCREAMING_SNAKE_CASE__ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''stage-2''' )
# Update arguments_dict
snake_case_ = model_path
snake_case_ = data_files['''train''']
snake_case_ = current_output_dir
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' , SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , SCREAMING_SNAKE_CASE__ )
finetune(**SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , SCREAMING_SNAKE_CASE__ )
snake_case_ = iteration
snake_case_ = data_dir_format(iteration + 1 )
snake_case_ = AutoConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' ) )
snake_case_ = config.idalabel
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''eval_results_best-checkpoint.json''' )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''test_results_best-checkpoint.json''' )
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f:
snake_case_ = float(json.load(SCREAMING_SNAKE_CASE__ )[args.eval_metric] )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
# Loading the dataset from local csv or json files.
snake_case_ = load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data''']
snake_case_ = load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
shutil.copy(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , F'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
shutil.copy(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , F'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , F'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
snake_case_ = eval_result
if best_iteration is None:
snake_case_ = new_iteration
snake_case_ = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
snake_case_ = new_iteration
snake_case_ = new_eval_result
snake_case_ = 0
else:
if new_eval_result == best_eval_result:
snake_case_ = new_iteration
snake_case_ = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
snake_case_ = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , SCREAMING_SNAKE_CASE__ )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE__ , F'''eval_results_iter-{iteration}.json''' ) , os.path.join(SCREAMING_SNAKE_CASE__ , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE__ , F'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(SCREAMING_SNAKE_CASE__ , '''eval_results_best-iteration.json''' ) , ) | 39 | 0 |
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class __snake_case ( unittest.TestCase ):
__lowerCAmelCase : Dict = inspect.getfile(accelerate.test_utils )
__lowerCAmelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_cli.py'] )
__lowerCAmelCase : Tuple = ['accelerate', 'launch']
__lowerCAmelCase : Union[str, Any] = Path.home() / '.cache/huggingface/accelerate'
__lowerCAmelCase : List[str] = 'default_config.yaml'
__lowerCAmelCase : List[Any] = config_folder / config_file
__lowerCAmelCase : str = config_folder / '_default_config.yaml'
__lowerCAmelCase : Optional[int] = Path('tests/test_configs' )
@classmethod
def lowerCAmelCase__ ( cls):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path)
@classmethod
def lowerCAmelCase__ ( cls):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path)
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy())
def lowerCAmelCase__ ( self):
for config in sorted(self.test_config_path.glob('**/*.yaml')):
with self.subTest(config_file=_A):
execute_subprocess_async(
self.base_cmd + ['--config_file', str(_A), self.test_file_path] , env=os.environ.copy())
def lowerCAmelCase__ ( self):
execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy())
class __snake_case ( unittest.TestCase ):
__lowerCAmelCase : Optional[Any] = 'test-tpu'
__lowerCAmelCase : str = 'us-central1-a'
__lowerCAmelCase : Union[str, Any] = 'ls'
__lowerCAmelCase : Union[str, Any] = ['accelerate', 'tpu-config']
__lowerCAmelCase : Union[str, Any] = 'cd /usr/share'
__lowerCAmelCase : List[Any] = 'tests/test_samples/test_command_file.sh'
__lowerCAmelCase : Dict = 'Running gcloud compute tpus tpu-vm ssh'
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd
+ ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=_A , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , )
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command',
self.command,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=_A , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , )
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=_A)
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , )
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=_A , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , )
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--command',
self.command,
'--command',
'echo "Hello World"',
'--debug',
] , return_stdout=_A , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _A , )
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd
+ ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=_A , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , )
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command_file',
self.command_file,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=_A , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , )
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=_A , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , )
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--install_accelerate',
'--accelerate_version',
'12.0.0',
'--debug',
] , return_stdout=_A , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , )
| 620 | 0 |
from __future__ import annotations
import numpy as np
def __lowerCamelCase ( _lowercase ) -> tuple[np.ndarray, np.ndarray]:
UpperCamelCase = np.shape(__UpperCAmelCase )
if rows != columns:
UpperCamelCase = (
"""'table' has to be of square shaped array but got a """
F'{rows}x{columns} array:\n{table}'
)
raise ValueError(__UpperCAmelCase )
UpperCamelCase = np.zeros((rows, columns) )
UpperCamelCase = np.zeros((rows, columns) )
for i in range(__UpperCAmelCase ):
for j in range(__UpperCAmelCase ):
UpperCamelCase = sum(lower[i][k] * upper[k][j] for k in range(__UpperCAmelCase ) )
if upper[j][j] == 0:
raise ArithmeticError('No LU decomposition exists' )
UpperCamelCase = (table[i][j] - total) / upper[j][j]
UpperCamelCase = 1
for j in range(__UpperCAmelCase , __UpperCAmelCase ):
UpperCamelCase = sum(lower[i][k] * upper[k][j] for k in range(__UpperCAmelCase ) )
UpperCamelCase = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 282 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
_A = logging.getLogger(__name__)
torch.set_grad_enabled(False)
_A = """cuda""" if torch.cuda.is_available() else """cpu"""
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=" " ) -> List[str]:
lowerCAmelCase__ : str = text.split(__UpperCAmelCase )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase )]
def lowercase_ ( __UpperCAmelCase ) -> dict:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = [], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(__UpperCAmelCase ):
titles.append(title if title is not None else """""" )
texts.append(__UpperCAmelCase )
return {"title": titles, "text": texts}
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> dict:
lowerCAmelCase__ : str = ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=__UpperCAmelCase , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""]
lowerCAmelCase__ : Tuple = ctx_encoder(input_ids.to(device=__UpperCAmelCase ) , return_dict=__UpperCAmelCase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Tuple:
######################################
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowerCAmelCase__ : Dict = load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowerCAmelCase__ : Dict = dataset.map(__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=processing_args.num_proc )
# And compute the embeddings
lowerCAmelCase__ : Any = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__UpperCAmelCase )
lowerCAmelCase__ : Any = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowerCAmelCase__ : List[Any] = Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
lowerCAmelCase__ : Optional[Any] = dataset.map(
partial(__UpperCAmelCase , ctx_encoder=__UpperCAmelCase , ctx_tokenizer=__UpperCAmelCase ) , batched=__UpperCAmelCase , batch_size=processing_args.batch_size , features=__UpperCAmelCase , )
# And finally save your dataset
lowerCAmelCase__ : List[Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(__UpperCAmelCase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowerCAmelCase__ : Tuple = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=__UpperCAmelCase )
# And save the index
lowerCAmelCase__ : Union[str, Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(__UpperCAmelCase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class _lowerCamelCase :
_lowerCamelCase :str = field(
default=str(Path(a_ ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
_lowerCamelCase :Optional[str] = field(
default=a_ , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
_lowerCamelCase :str = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
_lowerCamelCase :str = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
_lowerCamelCase :Optional[str] = field(
default=str(Path(a_ ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class _lowerCamelCase :
_lowerCamelCase :Optional[int] = field(
default=a_ , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
_lowerCamelCase :int = field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class _lowerCamelCase :
_lowerCamelCase :int = field(
default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
_lowerCamelCase :int = field(
default=128 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
_A = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
_A , _A , _A = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
_A = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 299 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__a : Tuple = {
"""configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Any = ["""ConvNextFeatureExtractor"""]
__a : int = ["""ConvNextImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Optional[Any] = [
"""CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvNextForImageClassification""",
"""ConvNextModel""",
"""ConvNextPreTrainedModel""",
"""ConvNextBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Optional[int] = [
"""TFConvNextForImageClassification""",
"""TFConvNextModel""",
"""TFConvNextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__a : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure) | 715 | from __future__ import annotations
__a : str = """Muhammad Umer Farooq"""
__a : Optional[Any] = """MIT"""
__a : int = """1.0.0"""
__a : Optional[int] = """Muhammad Umer Farooq"""
__a : Dict = """[email protected]"""
__a : Optional[Any] = """Alpha"""
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ ) -> None:
'''simple docstring'''
super().__init__()
__lowercase = []
__lowercase = domain
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__lowercase = parse.urljoin(self.domain , lowerCAmelCase__ )
self.urls.append(lowerCAmelCase__ )
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
return ".".join(get_sub_domain_name(lowercase ).split('''.''' )[-2:] )
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
return parse.urlparse(lowercase ).netloc
def UpperCAmelCase ( lowercase = "https://github.com" ):
"""simple docstring"""
__lowercase = get_domain_name(lowercase )
# Initialize the parser
__lowercase = Parser(lowercase )
try:
# Open URL
__lowercase = requests.get(lowercase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__lowercase = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__lowercase = requests.get(lowercase )
# Get the valid email.
__lowercase = re.findall('''[a-zA-Z0-9]+@''' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(lowercase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(lowercase )
if __name__ == "__main__":
__a : Union[str, Any] = emails_from_url("""https://github.com""")
print(F'''{len(emails)} emails found:''')
print("""\n""".join(sorted(emails))) | 522 | 0 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
_snake_case : List[str] = logging.get_logger(__name__)
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : List[str] , *lowerCamelCase : str , **lowerCamelCase : Union[str, Any] ) -> None:
warnings.warn(
"The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DeiTImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 81 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
_A = logging.get_logger(__name__)
_A = {
'''openai/whisper-base''': '''https://huggingface.co/openai/whisper-base/resolve/main/config.json''',
}
# fmt: off
_A = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
_A = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class A ( __UpperCAmelCase ):
__snake_case = 'whisper'
__snake_case = ['past_key_values']
__snake_case = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self, UpperCamelCase__=5_1865, UpperCamelCase__=80, UpperCamelCase__=6, UpperCamelCase__=4, UpperCamelCase__=6, UpperCamelCase__=4, UpperCamelCase__=1536, UpperCamelCase__=1536, UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=5_0257, UpperCamelCase__=True, UpperCamelCase__=True, UpperCamelCase__="gelu", UpperCamelCase__=256, UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=0.02, UpperCamelCase__=False, UpperCamelCase__=1500, UpperCamelCase__=448, UpperCamelCase__=5_0256, UpperCamelCase__=5_0256, UpperCamelCase__=5_0256, UpperCamelCase__=None, UpperCamelCase__=[220, 5_0256], UpperCamelCase__=False, UpperCamelCase__=256, UpperCamelCase__=False, UpperCamelCase__=0.05, UpperCamelCase__=10, UpperCamelCase__=2, UpperCamelCase__=0.0, UpperCamelCase__=10, UpperCamelCase__=0, UpperCamelCase__=7, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = num_mel_bins
lowerCAmelCase_ = d_model
lowerCAmelCase_ = encoder_layers
lowerCAmelCase_ = encoder_attention_heads
lowerCAmelCase_ = decoder_layers
lowerCAmelCase_ = decoder_attention_heads
lowerCAmelCase_ = decoder_ffn_dim
lowerCAmelCase_ = encoder_ffn_dim
lowerCAmelCase_ = dropout
lowerCAmelCase_ = attention_dropout
lowerCAmelCase_ = activation_dropout
lowerCAmelCase_ = activation_function
lowerCAmelCase_ = init_std
lowerCAmelCase_ = encoder_layerdrop
lowerCAmelCase_ = decoder_layerdrop
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = encoder_layers
lowerCAmelCase_ = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase_ = max_source_positions
lowerCAmelCase_ = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
lowerCAmelCase_ = classifier_proj_size
lowerCAmelCase_ = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase_ = apply_spec_augment
lowerCAmelCase_ = mask_time_prob
lowerCAmelCase_ = mask_time_length
lowerCAmelCase_ = mask_time_min_masks
lowerCAmelCase_ = mask_feature_prob
lowerCAmelCase_ = mask_feature_length
lowerCAmelCase_ = mask_feature_min_masks
lowerCAmelCase_ = median_filter_width
super().__init__(
pad_token_id=UpperCamelCase__, bos_token_id=UpperCamelCase__, eos_token_id=UpperCamelCase__, is_encoder_decoder=UpperCamelCase__, decoder_start_token_id=UpperCamelCase__, suppress_tokens=UpperCamelCase__, begin_suppress_tokens=UpperCamelCase__, **UpperCamelCase__, )
class A ( __UpperCAmelCase ):
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
lowerCAmelCase_ = {0: '''batch'''}
else:
lowerCAmelCase_ = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__, direction='''inputs''' )
return common_inputs
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = -1, UpperCamelCase__ = -1, UpperCamelCase__ = False, UpperCamelCase__ = None, UpperCamelCase__ = 2_2050, UpperCamelCase__ = 5.0, UpperCamelCase__ = 220, ):
"""simple docstring"""
lowerCAmelCase_ = OrderedDict()
lowerCAmelCase_ = OnnxConfig.generate_dummy_inputs(
self, preprocessor=preprocessor.feature_extractor, batch_size=UpperCamelCase__, framework=UpperCamelCase__, sampling_rate=UpperCamelCase__, time_duration=UpperCamelCase__, frequency=UpperCamelCase__, )
lowerCAmelCase_ = encoder_inputs['''input_features'''].shape[2]
lowerCAmelCase_ = encoder_sequence_length // 2 if self.use_past else seq_length
lowerCAmelCase_ = super().generate_dummy_inputs(
preprocessor.tokenizer, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
lowerCAmelCase_ = encoder_inputs.pop('''input_features''' )
lowerCAmelCase_ = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
lowerCAmelCase_ = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return 1E-3
| 431 | 0 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCamelCase = logging.get_logger(__name__)
class lowerCamelCase__ ( _snake_case ):
lowerCamelCase_ : int = 'linear'
lowerCamelCase_ : Tuple = 'cosine'
lowerCamelCase_ : Tuple = 'cosine_with_restarts'
lowerCamelCase_ : List[Any] = 'polynomial'
lowerCamelCase_ : Optional[Any] = 'constant'
lowerCamelCase_ : Optional[int] = 'constant_with_warmup'
lowerCamelCase_ : List[Any] = 'piecewise_constant'
def _a ( lowerCamelCase__ , lowerCamelCase__ = -1 ) -> int:
return LambdaLR(__UpperCAmelCase , lambda lowerCamelCase__ : 1 , last_epoch=__UpperCAmelCase )
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ) -> List[Any]:
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1.0 , __UpperCAmelCase ) )
return 1.0
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase )
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ) -> int:
lowerCamelCase_ : str = {}
lowerCamelCase_ : Tuple = step_rules.split(',' )
for rule_str in rule_list[:-1]:
lowerCamelCase_ , lowerCamelCase_ : Tuple = rule_str.split(':' )
lowerCamelCase_ : List[Any] = int(__UpperCAmelCase )
lowerCamelCase_ : List[Any] = float(__UpperCAmelCase )
lowerCamelCase_ : Dict = value
lowerCamelCase_ : List[Any] = float(rule_list[-1] )
def create_rules_function(lowerCamelCase__ , lowerCamelCase__ ):
def rule_func(lowerCamelCase__ ) -> float:
lowerCamelCase_ : Optional[int] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__UpperCAmelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowerCamelCase_ : Any = create_rules_function(__UpperCAmelCase , __UpperCAmelCase )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase )
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=-1 ) -> int:
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 0.5 , lowerCamelCase__ = -1 ) -> Dict:
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
lowerCamelCase_ : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__UpperCAmelCase ) * 2.0 * progress )) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 , lowerCamelCase__ = -1 ) -> Tuple:
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
lowerCamelCase_ : List[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__UpperCAmelCase ) * progress) % 1.0) )) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1e-7 , lowerCamelCase__=1.0 , lowerCamelCase__=-1 ) -> Tuple:
lowerCamelCase_ : List[Any] = optimizer.defaults['lr']
if not (lr_init > lr_end):
raise ValueError(F'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})' )
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowerCamelCase_ : Any = lr_init - lr_end
lowerCamelCase_ : List[Any] = num_training_steps - num_warmup_steps
lowerCamelCase_ : Tuple = 1 - (current_step - num_warmup_steps) / decay_steps
lowerCamelCase_ : Tuple = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 1 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = -1 , ) -> str:
lowerCamelCase_ : Union[str, Any] = SchedulerType(__UpperCAmelCase )
lowerCamelCase_ : Union[str, Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__UpperCAmelCase , last_epoch=__UpperCAmelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__UpperCAmelCase , step_rules=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F'{name} requires `num_warmup_steps`, please provide that argument.' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F'{name} requires `num_training_steps`, please provide that argument.' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , num_cycles=__UpperCAmelCase , last_epoch=__UpperCAmelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , power=__UpperCAmelCase , last_epoch=__UpperCAmelCase , )
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
| 713 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase )
class lowerCamelCase__ ( UpperCAmelCase ):
def __init__(self : List[Any] , **_snake_case : int ) -> Dict:
"""simple docstring"""
super().__init__(**_snake_case )
if self.framework == "tf":
raise ValueError(f'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , 'vision' )
self.check_model_type(_snake_case )
def __call__(self : int , _snake_case : Union[str, "Image.Image", List[Dict[str, Any]]] , _snake_case : Union[str, List[str]] = None , **_snake_case : Optional[int] , ) -> Dict:
"""simple docstring"""
if "text_queries" in kwargs:
lowerCamelCase_ : Tuple = kwargs.pop('text_queries' )
if isinstance(_snake_case , (str, Image.Image) ):
lowerCamelCase_ : Union[str, Any] = {'image': image, 'candidate_labels': candidate_labels}
else:
lowerCamelCase_ : List[str] = image
lowerCamelCase_ : List[Any] = super().__call__(_snake_case , **_snake_case )
return results
def UpperCAmelCase_ (self : Union[str, Any] , **_snake_case : Any ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : Dict = {}
if "threshold" in kwargs:
lowerCamelCase_ : int = kwargs['threshold']
if "top_k" in kwargs:
lowerCamelCase_ : Any = kwargs['top_k']
return {}, {}, postprocess_params
def UpperCAmelCase_ (self : Tuple , _snake_case : int ) -> int:
"""simple docstring"""
lowerCamelCase_ : List[str] = load_image(inputs['image'] )
lowerCamelCase_ : Any = inputs['candidate_labels']
if isinstance(_snake_case , _snake_case ):
lowerCamelCase_ : Any = candidate_labels.split(',' )
lowerCamelCase_ : List[Any] = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(_snake_case ):
lowerCamelCase_ : str = self.tokenizer(_snake_case , return_tensors=self.framework )
lowerCamelCase_ : Union[str, Any] = self.image_processor(_snake_case , return_tensors=self.framework )
yield {
"is_last": i == len(_snake_case ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def UpperCAmelCase_ (self : List[Any] , _snake_case : int ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : List[Any] = model_inputs.pop('target_size' )
lowerCamelCase_ : int = model_inputs.pop('candidate_label' )
lowerCamelCase_ : Tuple = model_inputs.pop('is_last' )
lowerCamelCase_ : Optional[Any] = self.model(**_snake_case )
lowerCamelCase_ : Optional[Any] = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs}
return model_outputs
def UpperCAmelCase_ (self : Optional[int] , _snake_case : Optional[Any] , _snake_case : Any=0.1 , _snake_case : Union[str, Any]=None ) -> Any:
"""simple docstring"""
lowerCamelCase_ : Dict = []
for model_output in model_outputs:
lowerCamelCase_ : str = model_output['candidate_label']
lowerCamelCase_ : Optional[int] = BaseModelOutput(_snake_case )
lowerCamelCase_ : List[str] = self.image_processor.post_process_object_detection(
outputs=_snake_case , threshold=_snake_case , target_sizes=model_output['target_size'] )[0]
for index in outputs["scores"].nonzero():
lowerCamelCase_ : Tuple = outputs['scores'][index].item()
lowerCamelCase_ : List[Any] = self._get_bounding_box(outputs['boxes'][index][0] )
lowerCamelCase_ : Dict = {'score': score, 'label': label, 'box': box}
results.append(_snake_case )
lowerCamelCase_ : int = sorted(_snake_case , key=lambda _snake_case : x["score"] , reverse=_snake_case )
if top_k:
lowerCamelCase_ : List[Any] = results[:top_k]
return results
def UpperCAmelCase_ (self : List[Any] , _snake_case : "torch.Tensor" ) -> Dict[str, int]:
"""simple docstring"""
if self.framework != "pt":
raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.' )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : List[Any] = box.int().tolist()
lowerCamelCase_ : Tuple = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 144 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
_A = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 299 |
"""simple docstring"""
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCamelCase :
def __init__( self : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : str=13 , UpperCamelCase : Optional[Any]=[30, 30] , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : List[Any]=3 , UpperCamelCase : Any=True , UpperCamelCase : List[Any]=True , UpperCamelCase : str=32 , UpperCamelCase : str=5 , UpperCamelCase : Dict=4 , UpperCamelCase : Tuple=37 , UpperCamelCase : Tuple="gelu" , UpperCamelCase : Optional[int]=0.1 , UpperCamelCase : Optional[int]=0.1 , UpperCamelCase : int=10 , UpperCamelCase : Any=0.02 , UpperCamelCase : str=3 , UpperCamelCase : Tuple=None , UpperCamelCase : List[Any]=8 , UpperCamelCase : str=10 , ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = parent
lowerCAmelCase__ : Optional[int] = batch_size
lowerCAmelCase__ : Union[str, Any] = image_size
lowerCAmelCase__ : Optional[Any] = patch_size
lowerCAmelCase__ : int = num_channels
lowerCAmelCase__ : int = is_training
lowerCAmelCase__ : Dict = use_labels
lowerCAmelCase__ : List[str] = hidden_size
lowerCAmelCase__ : Optional[Any] = num_hidden_layers
lowerCAmelCase__ : List[str] = num_attention_heads
lowerCAmelCase__ : List[str] = intermediate_size
lowerCAmelCase__ : Optional[int] = hidden_act
lowerCAmelCase__ : str = hidden_dropout_prob
lowerCAmelCase__ : Any = attention_probs_dropout_prob
lowerCAmelCase__ : Any = type_sequence_label_size
lowerCAmelCase__ : List[Any] = initializer_range
lowerCAmelCase__ : str = num_labels
lowerCAmelCase__ : List[Any] = scope
lowerCAmelCase__ : Dict = n_targets
lowerCAmelCase__ : Any = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowerCAmelCase__ : Any = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowerCAmelCase__ : Optional[int] = num_patches + 1 + self.num_detection_tokens
def _lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowerCAmelCase__ : Union[str, Any] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowerCAmelCase__ : Tuple = []
for i in range(self.batch_size ):
lowerCAmelCase__ : Optional[Any] = {}
lowerCAmelCase__ : Any = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=UpperCamelCase )
lowerCAmelCase__ : Tuple = torch.rand(self.n_targets , 4 , device=UpperCamelCase )
labels.append(UpperCamelCase )
lowerCAmelCase__ : Any = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Dict = YolosModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCAmelCase__ : int = model(UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def _lowerCAmelCase ( self : Dict , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : int ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = YolosForObjectDetection(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCAmelCase__ : Optional[int] = model(pixel_values=UpperCamelCase )
lowerCAmelCase__ : Dict = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowerCAmelCase__ : List[str] = model(pixel_values=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def _lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = config_and_inputs
lowerCAmelCase__ : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( a_ , a_ , unittest.TestCase ):
_lowerCamelCase :str = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_lowerCamelCase :Union[str, Any] = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
_lowerCamelCase :Any = False
_lowerCamelCase :Tuple = False
_lowerCamelCase :Tuple = False
_lowerCamelCase :Union[str, Any] = False
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : int , UpperCamelCase : str , UpperCamelCase : List[str]=False ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = super()._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowerCAmelCase__ : str = []
for i in range(self.model_tester.batch_size ):
lowerCAmelCase__ : List[Any] = {}
lowerCAmelCase__ : Optional[int] = torch.ones(
size=(self.model_tester.n_targets,) , device=UpperCamelCase , dtype=torch.long )
lowerCAmelCase__ : int = torch.ones(
self.model_tester.n_targets , 4 , device=UpperCamelCase , dtype=torch.float )
labels.append(UpperCamelCase )
lowerCAmelCase__ : int = labels
return inputs_dict
def _lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = YolosModelTester(self )
lowerCAmelCase__ : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37 )
def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
# YOLOS does not use inputs_embeds
pass
def _lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : str = model_class(UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase__ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase , nn.Linear ) )
def _lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : str = model_class(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : Optional[int] = [*signature.parameters.keys()]
lowerCAmelCase__ : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def _lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def _lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Dict = True
# in YOLOS, the seq_len is different
lowerCAmelCase__ : Any = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowerCAmelCase__ : List[Any] = True
lowerCAmelCase__ : List[Any] = False
lowerCAmelCase__ : Tuple = True
lowerCAmelCase__ : str = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
lowerCAmelCase__ : List[Any] = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : Tuple = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : str = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
lowerCAmelCase__ : int = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowerCAmelCase__ : int = len(UpperCamelCase )
# Check attention is always last and order is fine
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ : Dict = True
lowerCAmelCase__ : Dict = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Dict = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
lowerCAmelCase__ : List[Any] = 1
self.assertEqual(out_len + added_hidden_states , len(UpperCamelCase ) )
lowerCAmelCase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase : Any , UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any] ):
lowerCAmelCase__ : Dict = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
lowerCAmelCase__ : List[Any] = outputs.hidden_states
lowerCAmelCase__ : str = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
# YOLOS has a different seq_length
lowerCAmelCase__ : List[str] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : List[Any] = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ : Optional[Any] = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def _lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*UpperCamelCase )
@slow
def _lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Union[str, Any] = YolosModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def lowercase_ ( ) -> List[Any]:
lowerCAmelCase__ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
@cached_property
def _lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""hustvl/yolos-small""" ) if is_vision_available() else None
@slow
def _lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Dict = YolosForObjectDetection.from_pretrained("""hustvl/yolos-small""" ).to(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = self.default_image_processor
lowerCAmelCase__ : List[Any] = prepare_img()
lowerCAmelCase__ : Union[str, Any] = image_processor(images=UpperCamelCase , return_tensors="""pt""" ).to(UpperCamelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : Tuple = model(inputs.pixel_values )
# verify outputs
lowerCAmelCase__ : Union[str, Any] = torch.Size((1, 1_00, 92) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
lowerCAmelCase__ : Any = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=UpperCamelCase , )
lowerCAmelCase__ : Any = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase , atol=1E-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase , atol=1E-4 ) )
# verify postprocessing
lowerCAmelCase__ : Optional[int] = image_processor.post_process_object_detection(
UpperCamelCase , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowerCAmelCase__ : Tuple = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(UpperCamelCase )
lowerCAmelCase__ : Tuple = [75, 75, 17, 63, 17]
lowerCAmelCase__ : Tuple = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(UpperCamelCase )
self.assertEqual(len(results["""scores"""] ) , 5 )
self.assertTrue(torch.allclose(results["""scores"""] , UpperCamelCase , atol=1E-4 ) )
self.assertSequenceEqual(results["""labels"""].tolist() , UpperCamelCase )
self.assertTrue(torch.allclose(results["""boxes"""][0, :] , UpperCamelCase ) )
| 299 | 1 |
"""simple docstring"""
from __future__ import annotations
def A__ ( _UpperCAmelCase : list[int | float] , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int | float:
'''simple docstring'''
if len(_UpperCAmelCase ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(_UpperCAmelCase )
or left < -len(_UpperCAmelCase )
or right >= len(_UpperCAmelCase )
or right < -len(_UpperCAmelCase )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
snake_case__ : Tuple = (left + right) >> 1 # the middle
snake_case__ : Any = find_max(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # find max in range[left, mid]
snake_case__ : List[str] = find_max(_UpperCAmelCase , mid + 1 , _UpperCAmelCase ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 150 |
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowercase = """\
Text data.
Second line of data."""
lowercase = """file"""
@pytest.fixture(scope="session" )
def A__ ( _UpperCAmelCase : Union[str, Any] ) -> str:
'''simple docstring'''
snake_case__ : Any = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
snake_case__ : Optional[int] = bytes(_UpperCAmelCase , "utf-8" )
with zstd.open(_UpperCAmelCase , "wb" ) as f:
f.write(_UpperCAmelCase )
return path
@pytest.fixture
def A__ ( _UpperCAmelCase : Dict ) -> int:
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , _UpperCAmelCase ) , "w" ) as f:
f.write(_UpperCAmelCase )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def A__ ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
snake_case__ : str = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
snake_case__ : List[str] = input_paths[compression_format]
snake_case__ : List[str] = tmp_path / "cache"
snake_case__ : Tuple = DownloadConfig(cache_dir=_UpperCAmelCase , extract_compressed_file=_UpperCAmelCase )
snake_case__ : Any = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase )
with open(_UpperCAmelCase ) as f:
snake_case__ : str = f.read()
with open(_UpperCAmelCase ) as f:
snake_case__ : List[Any] = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def A__ ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> Dict:
'''simple docstring'''
snake_case__ : List[str] = "custom_cache"
snake_case__ : Any = "custom_extracted_dir"
snake_case__ : List[str] = tmp_path / "custom_extracted_path"
if default_extracted:
snake_case__ : Tuple = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , _UpperCAmelCase )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(_UpperCAmelCase ) )
snake_case__ : Optional[int] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
snake_case__ : List[Any] = xz_file
snake_case__ : Union[str, Any] = (
DownloadConfig(extract_compressed_file=_UpperCAmelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_UpperCAmelCase )
)
snake_case__ : List[Any] = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase )
assert Path(_UpperCAmelCase ).parent.parts[-2:] == expected
def A__ ( _UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : List[str] = str(Path(_UpperCAmelCase ).resolve() )
assert cached_path(_UpperCAmelCase ) == text_file
# relative path
snake_case__ : List[str] = str(Path(_UpperCAmelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_UpperCAmelCase ) == text_file
def A__ ( _UpperCAmelCase : Tuple ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Optional[int] = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(_UpperCAmelCase ):
cached_path(_UpperCAmelCase )
# relative path
snake_case__ : Optional[int] = "./__missing_file__.txt"
with pytest.raises(_UpperCAmelCase ):
cached_path(_UpperCAmelCase )
def A__ ( _UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
snake_case__ : int = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(_UpperCAmelCase ) as f:
snake_case__ : List[str] = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , _UpperCAmelCase )
def A__ ( ) -> Dict:
'''simple docstring'''
with pytest.raises(_UpperCAmelCase ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _UpperCAmelCase )
def A__ ( _UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
snake_case__ : int = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_UpperCAmelCase ):
http_get("https://huggingface.co" , temp_file=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _UpperCAmelCase )
def A__ ( _UpperCAmelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
snake_case__ : Optional[int] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_UpperCAmelCase ):
ftp_get("ftp://huggingface.co" , temp_file=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _UpperCAmelCase )
def A__ ( _UpperCAmelCase : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Dict = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_UpperCAmelCase ):
fsspec_get("s3://huggingface.co" , temp_file=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
fsspec_head("s3://huggingface.co" )
| 150 | 1 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: np.ndarray , lowerCAmelCase: np.ndarray , lowerCAmelCase: np.ndarray , lowerCAmelCase: int , lowerCAmelCase: int ) -> np.ndarray:
_UpperCAmelCase : int = cva.getAffineTransform(lowerCAmelCase , lowerCAmelCase )
return cva.warpAffine(lowerCAmelCase , lowerCAmelCase , (rows, cols) )
if __name__ == "__main__":
# read original image
SCREAMING_SNAKE_CASE_ = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
SCREAMING_SNAKE_CASE_ = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = gray_img.shape
# set different points to rotate image
SCREAMING_SNAKE_CASE_ = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
SCREAMING_SNAKE_CASE_ = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
SCREAMING_SNAKE_CASE_ = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
SCREAMING_SNAKE_CASE_ = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
SCREAMING_SNAKE_CASE_ = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
SCREAMING_SNAKE_CASE_ = plt.figure(1)
SCREAMING_SNAKE_CASE_ = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 300 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a ( unittest.TestCase ):
def _UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = 1
_UpperCAmelCase : Any = 3
_UpperCAmelCase : List[Any] = (32, 32)
_UpperCAmelCase : Optional[int] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A_ )
return image
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase : int = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(A_ )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
def extract(*A_ , **A_ ):
class a :
def __init__( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = torch.ones([0] )
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
self.pixel_values.to(A_ )
return self
return Out()
return extract
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Union[str, Any] = self.dummy_cond_unet
_UpperCAmelCase : List[str] = PNDMScheduler(skip_prk_steps=A_ )
_UpperCAmelCase : Optional[int] = self.dummy_vae
_UpperCAmelCase : Any = self.dummy_text_encoder
_UpperCAmelCase : Tuple = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
_UpperCAmelCase : Dict = 77
_UpperCAmelCase : Optional[int] = self.dummy_image.to(A_ )
_UpperCAmelCase : List[Any] = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : Tuple = AltDiffusionImgaImgPipeline(
unet=A_ , scheduler=A_ , vae=A_ , text_encoder=A_ , tokenizer=A_ , safety_checker=A_ , feature_extractor=self.dummy_extractor , )
_UpperCAmelCase : str = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=A_ )
_UpperCAmelCase : Dict = alt_pipe.to(A_ )
alt_pipe.set_progress_bar_config(disable=A_ )
_UpperCAmelCase : List[str] = "A painting of a squirrel eating a burger"
_UpperCAmelCase : Optional[int] = torch.Generator(device=A_ ).manual_seed(0 )
_UpperCAmelCase : Optional[int] = alt_pipe(
[prompt] , generator=A_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=A_ , )
_UpperCAmelCase : str = output.images
_UpperCAmelCase : Tuple = torch.Generator(device=A_ ).manual_seed(0 )
_UpperCAmelCase : Dict = alt_pipe(
[prompt] , generator=A_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=A_ , return_dict=A_ , )[0]
_UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
_UpperCAmelCase : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCAmelCase : int = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : str = self.dummy_cond_unet
_UpperCAmelCase : Any = PNDMScheduler(skip_prk_steps=A_ )
_UpperCAmelCase : Optional[Any] = self.dummy_vae
_UpperCAmelCase : Any = self.dummy_text_encoder
_UpperCAmelCase : Dict = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
_UpperCAmelCase : Any = 77
_UpperCAmelCase : List[Any] = self.dummy_image.to(A_ )
# put models in fp16
_UpperCAmelCase : str = unet.half()
_UpperCAmelCase : Optional[Any] = vae.half()
_UpperCAmelCase : int = bert.half()
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : Optional[int] = AltDiffusionImgaImgPipeline(
unet=A_ , scheduler=A_ , vae=A_ , text_encoder=A_ , tokenizer=A_ , safety_checker=A_ , feature_extractor=self.dummy_extractor , )
_UpperCAmelCase : int = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=A_ )
_UpperCAmelCase : List[str] = alt_pipe.to(A_ )
alt_pipe.set_progress_bar_config(disable=A_ )
_UpperCAmelCase : List[str] = "A painting of a squirrel eating a burger"
_UpperCAmelCase : str = torch.manual_seed(0 )
_UpperCAmelCase : int = alt_pipe(
[prompt] , generator=A_ , num_inference_steps=2 , output_type="np" , image=A_ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
_UpperCAmelCase : Dict = init_image.resize((760, 504) )
_UpperCAmelCase : str = "BAAI/AltDiffusion"
_UpperCAmelCase : str = AltDiffusionImgaImgPipeline.from_pretrained(
A_ , safety_checker=A_ , )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
_UpperCAmelCase : Union[str, Any] = "A fantasy landscape, trending on artstation"
_UpperCAmelCase : int = torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = pipe(
prompt=A_ , image=A_ , strength=0.75 , guidance_scale=7.5 , generator=A_ , output_type="np" , )
_UpperCAmelCase : Union[str, Any] = output.images[0]
_UpperCAmelCase : Optional[int] = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
_UpperCAmelCase : Optional[int] = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def _UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
_UpperCAmelCase : Optional[int] = init_image.resize((768, 512) )
_UpperCAmelCase : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
_UpperCAmelCase : int = "BAAI/AltDiffusion"
_UpperCAmelCase : Optional[int] = AltDiffusionImgaImgPipeline.from_pretrained(
A_ , safety_checker=A_ , )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
_UpperCAmelCase : int = "A fantasy landscape, trending on artstation"
_UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
_UpperCAmelCase : int = pipe(
prompt=A_ , image=A_ , strength=0.75 , guidance_scale=7.5 , generator=A_ , output_type="np" , )
_UpperCAmelCase : Union[str, Any] = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 300 | 1 |
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase : Tuple = logging.get_logger(__name__)
_UpperCamelCase : str = '▁'
_UpperCamelCase : str = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
_UpperCamelCase : Optional[Any] = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
_UpperCamelCase : List[str] = {
'facebook/m2m100_418M': 1024,
}
# fmt: off
_UpperCamelCase : Optional[Any] = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class snake_case ( UpperCAmelCase ):
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = ['''input_ids''', '''attention_mask''']
__magic_name__ = []
__magic_name__ = []
def __init__( self : str , A : Union[str, Any] , A : Any , A : List[str]=None , A : Tuple=None , A : Optional[Any]="<s>" , A : List[str]="</s>" , A : Tuple="</s>" , A : Dict="<pad>" , A : List[str]="<unk>" , A : List[str]="m2m100" , A : Optional[Dict[str, Any]] = None , A : Tuple=8 , **A : Union[str, Any] , ):
'''simple docstring'''
a : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
a : Any = language_codes
a : Optional[int] = FAIRSEQ_LANGUAGE_CODES[language_codes]
a : Optional[Any] = {lang_code: F'''__{lang_code}__''' for lang_code in fairseq_language_code}
a : Tuple = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(A )
for lang_code in fairseq_language_code
if self.get_lang_token(A ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=A , tgt_lang=A , bos_token=A , eos_token=A , sep_token=A , unk_token=A , pad_token=A , language_codes=A , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=A , **A , )
a : Union[str, Any] = vocab_file
a : str = load_json(A )
a : Dict = {v: k for k, v in self.encoder.items()}
a : Tuple = spm_file
a : List[str] = load_spm(A , self.sp_model_kwargs )
a : Optional[Any] = len(self.encoder )
a : Tuple = {
self.get_lang_token(A ): self.encoder_size + i for i, lang_code in enumerate(A )
}
a : Tuple = {lang_code: self.encoder_size + i for i, lang_code in enumerate(A )}
a : Optional[Any] = {v: k for k, v in self.lang_token_to_id.items()}
a : Tuple = src_lang if src_lang is not None else 'en'
a : Union[str, Any] = tgt_lang
a : Optional[int] = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
a : Optional[Any] = num_madeup_words
@property
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def lowerCamelCase__ ( self : Optional[int] , A : str ):
'''simple docstring'''
a : List[str] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCamelCase__ ( self : Optional[Any] , A : str ):
'''simple docstring'''
return self.sp_model.encode(A , out_type=A )
def lowerCamelCase__ ( self : Tuple , A : Dict ):
'''simple docstring'''
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(A , self.encoder[self.unk_token] )
def lowerCamelCase__ ( self : str , A : int ):
'''simple docstring'''
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(A , self.unk_token )
def lowerCamelCase__ ( self : List[str] , A : Tuple ):
'''simple docstring'''
a : Optional[Any] = []
a : Dict = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A ) + token
a : List[Any] = []
else:
current_sub_tokens.append(A )
out_string += self.sp_model.decode(A )
return out_string.strip()
def lowerCamelCase__ ( self : List[Any] , A : List[int] , A : Optional[List[int]] = None , A : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
a : List[str] = [1] * len(self.prefix_tokens )
a : int = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A )) + suffix_ones
return prefix_ones + ([0] * len(A )) + ([0] * len(A )) + suffix_ones
def lowerCamelCase__ ( self : Any , A : List[int] , A : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
a : List[str] = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ):
'''simple docstring'''
a : Optional[int] = self.__dict__.copy()
a : Optional[Any] = None
return state
def __setstate__( self : Any , A : Dict ):
'''simple docstring'''
a : Tuple = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
a : str = {}
a : Tuple = load_spm(self.spm_file , self.sp_model_kwargs )
def lowerCamelCase__ ( self : Optional[Any] , A : str , A : Optional[str] = None ):
'''simple docstring'''
a : Tuple = Path(A )
if not save_dir.is_dir():
raise OSError(F'''{save_directory} should be a directory''' )
a : List[str] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
a : Dict = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , A )
if os.path.abspath(self.spm_file ) != os.path.abspath(A ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , A )
elif not os.path.isfile(self.spm_file ):
with open(A , 'wb' ) as fi:
a : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(A )
return (str(A ), str(A ))
def lowerCamelCase__ ( self : int , A : List[str] , A : str = "en" , A : Optional[List[str]] = None , A : str = "ro" , **A : List[Any] , ):
'''simple docstring'''
a : Optional[int] = src_lang
a : Dict = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(A , A , **A )
def lowerCamelCase__ ( self : List[Any] , A : str , A : Optional[str] , A : Optional[str] , **A : Union[str, Any] ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
a : List[str] = src_lang
a : str = self(A , add_special_tokens=A , **A )
a : Optional[Any] = self.get_lang_id(A )
a : Optional[Any] = tgt_lang_id
return inputs
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
self.set_src_lang_special_tokens(self.src_lang )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCamelCase__ ( self : Optional[Any] , A : str ):
'''simple docstring'''
a : Dict = self.get_lang_token(A )
a : str = self.lang_token_to_id[lang_token]
a : int = [self.cur_lang_id]
a : List[str] = [self.eos_token_id]
def lowerCamelCase__ ( self : Any , A : str ):
'''simple docstring'''
a : Optional[int] = self.get_lang_token(A )
a : List[str] = self.lang_token_to_id[lang_token]
a : List[Any] = [self.cur_lang_id]
a : Union[str, Any] = [self.eos_token_id]
def lowerCamelCase__ ( self : int , A : str ):
'''simple docstring'''
return self.lang_code_to_token[lang]
def lowerCamelCase__ ( self : List[Any] , A : str ):
'''simple docstring'''
a : Optional[Any] = self.get_lang_token(A )
return self.lang_token_to_id[lang_token]
def snake_case (A_ :str , A_ :Dict[str, Any] ):
'''simple docstring'''
a : Optional[int] = sentencepiece.SentencePieceProcessor(**A_ )
spm.Load(str(A_ ) )
return spm
def snake_case (A_ :str ):
'''simple docstring'''
with open(A_ , 'r' ) as f:
return json.load(A_ )
def snake_case (A_ :Tuple , A_ :str ):
'''simple docstring'''
with open(A_ , 'w' ) as f:
json.dump(A_ , A_ , indent=2 )
| 118 |
"""simple docstring"""
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class snake_case ( UpperCAmelCase , UpperCAmelCase ):
@register_to_config
def __init__( self : Dict , *,
A : int = 4 , A : int = 7_6_8 , A : int , A : Any , ):
'''simple docstring'''
super().__init__()
a : Optional[Any] = nn.Parameter(torch.zeros(A ) )
# parameters for additional clip time embeddings
a : str = nn.Linear(A , A )
a : List[Any] = nn.Linear(A , A )
# parameters for encoder hidden states
a : Optional[Any] = clip_extra_context_tokens
a : Union[str, Any] = nn.Linear(
A , self.clip_extra_context_tokens * cross_attention_dim )
a : Optional[Any] = nn.Linear(A , A )
a : Any = nn.LayerNorm(A )
def lowerCamelCase__ ( self : Dict , *, A : str , A : Optional[Any] , A : int , A : Dict ):
'''simple docstring'''
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
a : Dict = image_embeddings.shape[0]
a : Tuple = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
a : List[str] = classifier_free_guidance_embeddings.expand(
A , -1 )
a : Union[str, Any] = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
a : Dict = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
a : Optional[int] = self.embedding_proj(A )
a : Optional[int] = self.clip_image_embeddings_project_to_time_embeddings(A )
a : Tuple = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
a : Union[str, Any] = self.clip_extra_context_tokens_proj(A )
a : Dict = clip_extra_context_tokens.reshape(A , -1 , self.clip_extra_context_tokens )
a : Union[str, Any] = clip_extra_context_tokens.permute(0 , 2 , 1 )
a : Tuple = self.encoder_hidden_states_proj(A )
a : Optional[Any] = self.text_encoder_hidden_states_norm(A )
a : Optional[int] = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 118 | 1 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=2 , ) -> List[str]:
__lowerCamelCase : Optional[int] = parent
__lowerCamelCase : str = batch_size
__lowerCamelCase : List[Any] = image_size
__lowerCamelCase : Any = patch_size
__lowerCamelCase : Tuple = num_channels
__lowerCamelCase : Dict = is_training
__lowerCamelCase : List[str] = use_labels
__lowerCamelCase : Optional[Any] = hidden_size
__lowerCamelCase : int = num_hidden_layers
__lowerCamelCase : int = num_attention_heads
__lowerCamelCase : str = intermediate_size
__lowerCamelCase : Optional[int] = hidden_act
__lowerCamelCase : List[str] = hidden_dropout_prob
__lowerCamelCase : Tuple = attention_probs_dropout_prob
__lowerCamelCase : Optional[Any] = type_sequence_label_size
__lowerCamelCase : List[str] = initializer_range
__lowerCamelCase : Union[str, Any] = scope
__lowerCamelCase : Union[str, Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__lowerCamelCase : List[Any] = (image_size // patch_size) ** 2
__lowerCamelCase : Optional[int] = num_patches + 2
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : List[Any] = None
if self.use_labels:
__lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase : Dict = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self ) -> Tuple:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
__lowerCamelCase : Union[str, Any] = TFDeiTModel(config=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
__lowerCamelCase : Tuple = TFDeiTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__lowerCamelCase : List[str] = 1
__lowerCamelCase : List[Any] = TFDeiTForMaskedImageModeling(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCamelCase : Any = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
__lowerCamelCase : Dict = self.type_sequence_label_size
__lowerCamelCase : Optional[Any] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCamelCase : Tuple = 1
__lowerCamelCase : List[Any] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCamelCase : str = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[int] = config_and_inputs
__lowerCamelCase : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Any = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
lowerCamelCase : List[str] = (
{
'feature-extraction': TFDeiTModel,
'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
lowerCamelCase : Optional[int] = False
lowerCamelCase : List[Any] = False
lowerCamelCase : List[str] = False
lowerCamelCase : Any = False
def lowercase_ ( self ) -> str:
__lowerCamelCase : Any = TFDeiTModelTester(self )
__lowerCamelCase : Any = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def lowercase_ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def lowercase_ ( self ) -> Union[str, Any]:
pass
def lowercase_ ( self ) -> int:
__lowerCamelCase , __lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Any = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__lowerCamelCase : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , tf.keras.layers.Dense ) )
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Optional[int] = model_class(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : Tuple = [*signature.parameters.keys()]
__lowerCamelCase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Dict:
__lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Any:
__lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> Dict:
__lowerCamelCase : Optional[int] = super()._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def lowercase_ ( self ) -> Dict:
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Tuple = TFDeiTModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase__ ( ) -> Optional[int]:
__lowerCamelCase : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase_ ( self ) -> str:
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : Optional[int] = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' )
__lowerCamelCase : Union[str, Any] = self.default_image_processor
__lowerCamelCase : Dict = prepare_img()
__lowerCamelCase : Dict = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='tf' )
# forward pass
__lowerCamelCase : Dict = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
__lowerCamelCase : Union[str, Any] = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[str] = tf.constant([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 13 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : List[str] = logging.get_logger(__name__)
# TODO Update this
A__ : Tuple = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Tuple = 'esm'
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10_26 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_="absolute" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> List[str]:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , mask_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = vocab_size
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : str = num_hidden_layers
__lowerCamelCase : List[str] = num_attention_heads
__lowerCamelCase : Any = intermediate_size
__lowerCamelCase : Optional[Any] = hidden_dropout_prob
__lowerCamelCase : Tuple = attention_probs_dropout_prob
__lowerCamelCase : Optional[int] = max_position_embeddings
__lowerCamelCase : str = initializer_range
__lowerCamelCase : Optional[int] = layer_norm_eps
__lowerCamelCase : List[str] = position_embedding_type
__lowerCamelCase : int = use_cache
__lowerCamelCase : Optional[Any] = emb_layer_norm_before
__lowerCamelCase : Optional[Any] = token_dropout
__lowerCamelCase : str = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('No esmfold_config supplied for folding model, using default values.' )
__lowerCamelCase : Dict = EsmFoldConfig()
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Optional[int] = EsmFoldConfig(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = esmfold_config
if vocab_list is None:
logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' )
__lowerCamelCase : List[str] = get_default_vocab_list()
else:
__lowerCamelCase : Optional[Any] = vocab_list
else:
__lowerCamelCase : Dict = None
__lowerCamelCase : Optional[Any] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , 'use_esm_attn_map' , SCREAMING_SNAKE_CASE_ ):
raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' )
def lowercase_ ( self ) -> Any:
__lowerCamelCase : Any = super().to_dict()
if isinstance(self.esmfold_config , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : int = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : str = None
lowerCamelCase : bool = True
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : float = 0
lowerCamelCase : bool = True
lowerCamelCase : bool = False
lowerCamelCase : int = 1_2_8
lowerCamelCase : "TrunkConfig" = None
def lowercase_ ( self ) -> Any:
if self.trunk is None:
__lowerCamelCase : List[str] = TrunkConfig()
elif isinstance(self.trunk , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Any = TrunkConfig(**self.trunk )
def lowercase_ ( self ) -> int:
__lowerCamelCase : Optional[int] = asdict(self )
__lowerCamelCase : str = self.trunk.to_dict()
return output
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : int = 4_8
lowerCamelCase : int = 1_0_2_4
lowerCamelCase : int = 1_2_8
lowerCamelCase : int = 3_2
lowerCamelCase : int = 3_2
lowerCamelCase : int = 3_2
lowerCamelCase : float = 0
lowerCamelCase : float = 0
lowerCamelCase : bool = False
lowerCamelCase : int = 4
lowerCamelCase : Optional[int] = 1_2_8
lowerCamelCase : "StructureModuleConfig" = None
def lowercase_ ( self ) -> Optional[int]:
if self.structure_module is None:
__lowerCamelCase : Dict = StructureModuleConfig()
elif isinstance(self.structure_module , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Optional[Any] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'`max_recycles` should be positive, got {self.max_recycles}.' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'
f' {self.sequence_state_dim} and {self.sequence_state_dim}.' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'
f' {self.pairwise_state_dim} and {self.pairwise_state_dim}.' )
__lowerCamelCase : Tuple = self.sequence_state_dim // self.sequence_head_width
__lowerCamelCase : str = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'
f' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'
f' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.' )
if self.dropout >= 0.4:
raise ValueError(f'`dropout` should not be greater than 0.4, got {self.dropout}.' )
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : List[str] = asdict(self )
__lowerCamelCase : int = self.structure_module.to_dict()
return output
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : int = 3_8_4
lowerCamelCase : int = 1_2_8
lowerCamelCase : int = 1_6
lowerCamelCase : int = 1_2_8
lowerCamelCase : int = 1_2
lowerCamelCase : int = 4
lowerCamelCase : int = 8
lowerCamelCase : float = 0.1
lowerCamelCase : int = 8
lowerCamelCase : int = 1
lowerCamelCase : int = 2
lowerCamelCase : int = 7
lowerCamelCase : int = 1_0
lowerCamelCase : float = 1e-8
lowerCamelCase : float = 1e5
def lowercase_ ( self ) -> Any:
return asdict(self )
def UpperCAmelCase__ ( ) -> Optional[Any]:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 13 | 1 |
"""simple docstring"""
from maths.prime_check import is_prime
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = F'Input value of [number={number}] must be an integer'
raise TypeError(SCREAMING_SNAKE_CASE )
if is_prime(SCREAMING_SNAKE_CASE ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
"""simple docstring"""
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __lowerCamelCase ( _a ):
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Union[str, Any]:
UpperCamelCase__ = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' , type=snake_case_ , default=snake_case_ , help='Path to location to store the models' )
download_parser.add_argument(
'--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , )
download_parser.add_argument('model' , type=snake_case_ , help='Name of the model to download' )
download_parser.set_defaults(func=snake_case_ )
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> str:
UpperCamelCase__ = model
UpperCamelCase__ = cache
UpperCamelCase__ = force
UpperCamelCase__ = trust_remote_code
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 20 | 0 |
"""simple docstring"""
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = jnp.floataa
UpperCamelCase = True
def lowercase__ ( self : Any ) -> Any:
'''simple docstring'''
super().setup()
UpperCAmelCase_ = nn.Dense(5 , dtype=self.dtype )
def __call__( self : Optional[int] , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : int ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = super().__call__(*_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase_ = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = FlaxBigBirdForNaturalQuestionsModule
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
def cross_entropy(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ):
UpperCAmelCase_ = logits.shape[-1]
UpperCAmelCase_ = (labels[..., None] == jnp.arange(lowerCAmelCase__ )[None]).astype("f4" )
UpperCAmelCase_ = jax.nn.log_softmax(lowerCAmelCase__ , axis=-1 )
UpperCAmelCase_ = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
UpperCAmelCase_ = reduction(lowerCAmelCase__ )
return loss
UpperCAmelCase_ = partial(lowerCAmelCase__ , reduction=jnp.mean )
UpperCAmelCase_ = cross_entropy(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = cross_entropy(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = cross_entropy(lowerCAmelCase__ , lowerCAmelCase__ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class lowercase__ :
'''simple docstring'''
UpperCamelCase = "google/bigbird-roberta-base"
UpperCamelCase = 30_00
UpperCamelCase = 1_05_00
UpperCamelCase = 1_28
UpperCamelCase = 3
UpperCamelCase = 1
UpperCamelCase = 5
# tx_args
UpperCamelCase = 3E-5
UpperCamelCase = 0.0
UpperCamelCase = 2_00_00
UpperCamelCase = 0.0_0_9_5
UpperCamelCase = "bigbird-roberta-natural-questions"
UpperCamelCase = "training-expt"
UpperCamelCase = "data/nq-training.jsonl"
UpperCamelCase = "data/nq-validation.jsonl"
def lowercase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
os.makedirs(self.base_dir , exist_ok=_UpperCAmelCase )
UpperCAmelCase_ = os.path.join(self.base_dir , self.save_dir )
UpperCAmelCase_ = self.batch_size_per_device * jax.device_count()
@dataclass
class lowercase__ :
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 40_96 # no dynamic padding on TPUs
def __call__( self : str , _UpperCAmelCase : List[str] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.collate_fn(_UpperCAmelCase )
UpperCAmelCase_ = jax.tree_util.tree_map(_UpperCAmelCase , _UpperCAmelCase )
return batch
def lowercase__ ( self : str , _UpperCAmelCase : Dict ) -> Any:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.fetch_inputs(features["input_ids"] )
UpperCAmelCase_ = {
"input_ids": jnp.array(_UpperCAmelCase , dtype=jnp.intaa ),
"attention_mask": jnp.array(_UpperCAmelCase , dtype=jnp.intaa ),
"start_labels": jnp.array(features["start_token"] , dtype=jnp.intaa ),
"end_labels": jnp.array(features["end_token"] , dtype=jnp.intaa ),
"pooled_labels": jnp.array(features["category"] , dtype=jnp.intaa ),
}
return batch
def lowercase__ ( self : str , _UpperCAmelCase : list ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = [self._fetch_inputs(_UpperCAmelCase ) for ids in input_ids]
return zip(*_UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : list ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = [1 for _ in range(len(_UpperCAmelCase ) )]
while len(_UpperCAmelCase ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ):
if seed is not None:
UpperCAmelCase_ = dataset.shuffle(seed=lowerCAmelCase__ )
for i in range(len(lowerCAmelCase__ ) // batch_size ):
UpperCAmelCase_ = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(lowerCAmelCase__ )
@partial(jax.pmap , axis_name="batch" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ):
def loss_fn(lowerCAmelCase__ ):
UpperCAmelCase_ = model_inputs.pop("start_labels" )
UpperCAmelCase_ = model_inputs.pop("end_labels" )
UpperCAmelCase_ = model_inputs.pop("pooled_labels" )
UpperCAmelCase_ = state.apply_fn(**lowerCAmelCase__ , params=lowerCAmelCase__ , dropout_rng=lowerCAmelCase__ , train=lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = outputs
return state.loss_fn(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
UpperCAmelCase_ , UpperCAmelCase_ = jax.random.split(lowerCAmelCase__ )
UpperCAmelCase_ = jax.value_and_grad(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = grad_fn(state.params )
UpperCAmelCase_ = jax.lax.pmean({"loss": loss} , axis_name="batch" )
UpperCAmelCase_ = jax.lax.pmean(lowerCAmelCase__ , "batch" )
UpperCAmelCase_ = state.apply_gradients(grads=lowerCAmelCase__ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="batch" )
def a__ ( lowerCAmelCase__ , **lowerCAmelCase__ ):
UpperCAmelCase_ = model_inputs.pop("start_labels" )
UpperCAmelCase_ = model_inputs.pop("end_labels" )
UpperCAmelCase_ = model_inputs.pop("pooled_labels" )
UpperCAmelCase_ = state.apply_fn(**lowerCAmelCase__ , params=state.params , train=lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = outputs
UpperCAmelCase_ = state.loss_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = jax.lax.pmean({"loss": loss} , axis_name="batch" )
return metrics
class lowercase__ ( train_state.TrainState ):
'''simple docstring'''
UpperCamelCase = struct.field(pytree_node=SCREAMING_SNAKE_CASE )
@dataclass
class lowercase__ :
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = None
def lowercase__ ( self : int , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : int=None ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = model.params
UpperCAmelCase_ = TrainState.create(
apply_fn=model.__call__ , params=_UpperCAmelCase , tx=_UpperCAmelCase , loss_fn=_UpperCAmelCase , )
if ckpt_dir is not None:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = restore_checkpoint(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = {
"lr": args.lr,
"init_lr": args.init_lr,
"warmup_steps": args.warmup_steps,
"num_train_steps": num_train_steps,
"weight_decay": args.weight_decay,
}
UpperCAmelCase_ , UpperCAmelCase_ = build_tx(**_UpperCAmelCase )
UpperCAmelCase_ = train_state.TrainState(
step=_UpperCAmelCase , apply_fn=model.__call__ , params=_UpperCAmelCase , tx=_UpperCAmelCase , opt_state=_UpperCAmelCase , )
UpperCAmelCase_ = args
UpperCAmelCase_ = data_collator
UpperCAmelCase_ = lr
UpperCAmelCase_ = params
UpperCAmelCase_ = jax_utils.replicate(_UpperCAmelCase )
return state
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.args
UpperCAmelCase_ = len(_UpperCAmelCase ) // args.batch_size
UpperCAmelCase_ = jax.random.PRNGKey(0 )
UpperCAmelCase_ = jax.random.split(_UpperCAmelCase , jax.device_count() )
for epoch in range(args.max_epochs ):
UpperCAmelCase_ = jnp.array(0 , dtype=jnp.floataa )
UpperCAmelCase_ = get_batched_dataset(_UpperCAmelCase , args.batch_size , seed=_UpperCAmelCase )
UpperCAmelCase_ = 0
for batch in tqdm(_UpperCAmelCase , total=_UpperCAmelCase , desc=F"""Running EPOCH-{epoch}""" ):
UpperCAmelCase_ = self.data_collator(_UpperCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.train_step_fn(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
if i % args.logging_steps == 0:
UpperCAmelCase_ = jax_utils.unreplicate(state.step )
UpperCAmelCase_ = running_loss.item() / i
UpperCAmelCase_ = self.scheduler_fn(state_step - 1 )
UpperCAmelCase_ = self.evaluate(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = {
"step": state_step.item(),
"eval_loss": eval_loss.item(),
"tr_loss": tr_loss,
"lr": lr.item(),
}
tqdm.write(str(_UpperCAmelCase ) )
self.logger.log(_UpperCAmelCase , commit=_UpperCAmelCase )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F"""-e{epoch}-s{i}""" , state=_UpperCAmelCase )
def lowercase__ ( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = get_batched_dataset(_UpperCAmelCase , self.args.batch_size )
UpperCAmelCase_ = len(_UpperCAmelCase ) // self.args.batch_size
UpperCAmelCase_ = jnp.array(0 , dtype=jnp.floataa )
UpperCAmelCase_ = 0
for batch in tqdm(_UpperCAmelCase , total=_UpperCAmelCase , desc="Evaluating ... " ):
UpperCAmelCase_ = self.data_collator(_UpperCAmelCase )
UpperCAmelCase_ = self.val_step_fn(_UpperCAmelCase , **_UpperCAmelCase )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
return running_loss / i
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = jax_utils.unreplicate(_UpperCAmelCase )
print(F"""SAVING CHECKPOINT IN {save_dir}""" , end=" ... " )
self.model_save_fn(_UpperCAmelCase , params=state.params )
with open(os.path.join(_UpperCAmelCase , "opt_state.msgpack" ) , "wb" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(_UpperCAmelCase , "args.joblib" ) )
joblib.dump(self.data_collator , os.path.join(_UpperCAmelCase , "data_collator.joblib" ) )
with open(os.path.join(_UpperCAmelCase , "training_state.json" ) , "w" ) as f:
json.dump({"step": state.step.item()} , _UpperCAmelCase )
print("DONE" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
print(f"""RESTORING CHECKPOINT FROM {save_dir}""" , end=" ... " )
with open(os.path.join(lowerCAmelCase__ , "flax_model.msgpack" ) , "rb" ) as f:
UpperCAmelCase_ = from_bytes(state.params , f.read() )
with open(os.path.join(lowerCAmelCase__ , "opt_state.msgpack" ) , "rb" ) as f:
UpperCAmelCase_ = from_bytes(state.opt_state , f.read() )
UpperCAmelCase_ = joblib.load(os.path.join(lowerCAmelCase__ , "args.joblib" ) )
UpperCAmelCase_ = joblib.load(os.path.join(lowerCAmelCase__ , "data_collator.joblib" ) )
with open(os.path.join(lowerCAmelCase__ , "training_state.json" ) , "r" ) as f:
UpperCAmelCase_ = json.load(lowerCAmelCase__ )
UpperCAmelCase_ = training_state["step"]
print("DONE" )
return params, opt_state, step, args, data_collator
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = num_train_steps - warmup_steps
UpperCAmelCase_ = optax.linear_schedule(init_value=lowerCAmelCase__ , end_value=lowerCAmelCase__ , transition_steps=lowerCAmelCase__ )
UpperCAmelCase_ = optax.linear_schedule(init_value=lowerCAmelCase__ , end_value=1e-7 , transition_steps=lowerCAmelCase__ )
UpperCAmelCase_ = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
def weight_decay_mask(lowerCAmelCase__ ):
UpperCAmelCase_ = traverse_util.flatten_dict(lowerCAmelCase__ )
UpperCAmelCase_ = {k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()}
return traverse_util.unflatten_dict(lowerCAmelCase__ )
UpperCAmelCase_ = scheduler_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = optax.adamw(learning_rate=lowerCAmelCase__ , weight_decay=lowerCAmelCase__ , mask=lowerCAmelCase__ )
return tx, lr
| 82 | import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
a_ = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
a_ = {
'gpt-neox-20b': 2_048,
}
class _lowercase ( snake_case_ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
def __init__( self : Dict , snake_case : Union[str, Any]=None , snake_case : int=None , snake_case : Union[str, Any]=None , snake_case : str="<|endoftext|>" , snake_case : Optional[Any]="<|endoftext|>" , snake_case : List[str]="<|endoftext|>" , snake_case : List[Any]=False , **snake_case : str , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
snake_case , snake_case , tokenizer_file=snake_case , unk_token=snake_case , bos_token=snake_case , eos_token=snake_case , add_prefix_space=snake_case , **snake_case , )
UpperCamelCase_ : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case ) != add_prefix_space:
UpperCamelCase_ : Optional[int] = getattr(snake_case , pre_tok_state.pop('type' ) )
UpperCamelCase_ : Optional[Any] = add_prefix_space
UpperCamelCase_ : List[Any] = pre_tok_class(**snake_case )
UpperCamelCase_ : Optional[Any] = add_prefix_space
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case : str , snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case : "Conversation" ) -> List[int]:
"""simple docstring"""
UpperCamelCase_ : Any = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case , add_special_tokens=snake_case ) + [self.eos_token_id] )
if len(snake_case ) > self.model_max_length:
UpperCamelCase_ : List[Any] = input_ids[-self.model_max_length :]
return input_ids
| 417 | 0 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _A ( snake_case ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : List[str] = params
snake_case : List[Any] = np.array(SCREAMING_SNAKE_CASE_ )
snake_case : Any = np.array([len(SCREAMING_SNAKE_CASE_ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
'''simple docstring'''
return len(self.lengths )
def snake_case_ ( self ):
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Union[str, Any] = self.params.max_model_input_size
snake_case : Optional[Any] = self.lengths > max_len
logger.info(F"""Splitting {sum(SCREAMING_SNAKE_CASE_ )} too long sequences.""" )
def divide_chunks(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
return [l[i : i + n] for i in range(0 ,len(SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ )]
snake_case : List[Any] = []
snake_case : Optional[int] = []
if self.params.mlm:
snake_case , snake_case : Optional[int] = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
snake_case , snake_case : int = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids ,self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
snake_case : Any = []
for sub_s in divide_chunks(seq_ ,max_len - 2 ):
if sub_s[0] != cls_id:
snake_case : Tuple = np.insert(SCREAMING_SNAKE_CASE_ ,0 ,SCREAMING_SNAKE_CASE_ )
if sub_s[-1] != sep_id:
snake_case : Optional[Any] = np.insert(SCREAMING_SNAKE_CASE_ ,len(SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ )
assert len(SCREAMING_SNAKE_CASE_ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(SCREAMING_SNAKE_CASE_ )
new_tok_ids.extend(SCREAMING_SNAKE_CASE_ )
new_lengths.extend([len(SCREAMING_SNAKE_CASE_ ) for l in sub_seqs] )
snake_case : Dict = np.array(SCREAMING_SNAKE_CASE_ )
snake_case : Optional[Any] = np.array(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : str = len(self )
snake_case : Optional[int] = self.lengths > 11
snake_case : Optional[int] = self.token_ids[indices]
snake_case : Optional[Any] = self.lengths[indices]
snake_case : Optional[int] = len(self )
logger.info(F"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" )
def snake_case_ ( self ):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
snake_case : Optional[int] = self.params.special_tok_ids["""unk_token"""]
snake_case : int = len(self )
snake_case : Optional[Any] = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
snake_case : Optional[Any] = (unk_occs / self.lengths) < 0.5
snake_case : Optional[Any] = self.token_ids[indices]
snake_case : Optional[Any] = self.lengths[indices]
snake_case : Optional[Any] = len(self )
logger.info(F"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" )
def snake_case_ ( self ):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(F"""{len(self )} sequences""" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Optional[Any] = [t[0] for t in batch]
snake_case : List[str] = [t[1] for t in batch]
assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ )
# Max for paddings
snake_case : Any = max(SCREAMING_SNAKE_CASE_ )
# Pad token ids
if self.params.mlm:
snake_case : Tuple = self.params.special_tok_ids["""pad_token"""]
else:
snake_case : Union[str, Any] = self.params.special_tok_ids["""unk_token"""]
snake_case : Union[str, Any] = [list(t.astype(SCREAMING_SNAKE_CASE_ ) ) + [pad_idx] * (max_seq_len_ - len(SCREAMING_SNAKE_CASE_ )) for t in token_ids]
assert len(tk_ ) == len(SCREAMING_SNAKE_CASE_ )
assert all(len(SCREAMING_SNAKE_CASE_ ) == max_seq_len_ for t in tk_ )
snake_case : Union[str, Any] = torch.tensor(tk_ ) # (bs, max_seq_len_)
snake_case : Tuple = torch.tensor(SCREAMING_SNAKE_CASE_ ) # (bs)
return tk_t, lg_t
| 315 |
from collections import namedtuple
__lowercase : Tuple = namedtuple('''from_to''', '''from_ to''')
__lowercase : Dict = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.001, 1_000),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.00_454, 264.172),
'''cubicyard''': from_to(0.76_455, 1.30_795),
'''cubicfoot''': from_to(0.028, 35.3_147),
'''cup''': from_to(0.000_236_588, 4_226.75),
}
def lowercase ( __A : float , __A : str , __A : str ) -> float:
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
f"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ """, """.join(__A ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ """, """.join(__A ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 315 | 1 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase_ = LEDTokenizer
lowerCamelCase_ = LEDTokenizerFast
lowerCamelCase_ = True
def _snake_case ( self :Dict ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
SCREAMING_SNAKE_CASE__ = dict(zip(_A , range(len(_A ) ) ) )
SCREAMING_SNAKE_CASE__ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
SCREAMING_SNAKE_CASE__ = {"""unk_token""": """<unk>"""}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_A ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_A ) )
def _snake_case ( self :List[Any] , **__A :Dict ) -> Dict:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def _snake_case ( self :Any , **__A :Any ) -> List[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def _snake_case ( self :Tuple , __A :str ) -> Tuple:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def _snake_case ( self :str ) -> str:
"""simple docstring"""
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def _snake_case ( self :Any ) -> Tuple:
"""simple docstring"""
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def _snake_case ( self :Tuple ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
SCREAMING_SNAKE_CASE__ = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE__ = tokenizer(_A , max_length=len(_A ) , padding=_A , return_tensors="""pt""" )
self.assertIsInstance(_A , _A )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE__ = batch.input_ids.tolist()[0]
self.assertListEqual(_A , _A )
@require_torch
def _snake_case ( self :List[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE__ = tokenizer(_A , padding=_A , return_tensors="""pt""" )
self.assertIn("""input_ids""" , _A )
self.assertIn("""attention_mask""" , _A )
self.assertNotIn("""labels""" , _A )
self.assertNotIn("""decoder_attention_mask""" , _A )
@require_torch
def _snake_case ( self :List[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE__ = tokenizer(text_target=_A , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def _snake_case ( self :List[Any] ) -> List[str]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE__ = tokenizer(
["""I am a small frog""" * 1024, """I am a small frog"""] , padding=_A , truncation=_A , return_tensors="""pt""" )
self.assertIsInstance(_A , _A )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def _snake_case ( self :Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""A long paragraph for summarization."""]
SCREAMING_SNAKE_CASE__ = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE__ = tokenizer(_A , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ = tokenizer(text_target=_A , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ = inputs["""input_ids"""]
SCREAMING_SNAKE_CASE__ = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def _snake_case ( self :str ) -> Tuple:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE__ = ["""Summary of the text.""", """Another summary."""]
SCREAMING_SNAKE_CASE__ = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
SCREAMING_SNAKE_CASE__ = tokenizer(_A , padding=_A )
SCREAMING_SNAKE_CASE__ = [[0] * len(_A ) for x in encoded_output["""input_ids"""]]
SCREAMING_SNAKE_CASE__ = tokenizer.pad(_A )
self.assertSequenceEqual(outputs["""global_attention_mask"""] , _A )
def _snake_case ( self :Any ) -> int:
"""simple docstring"""
pass
def _snake_case ( self :Optional[Any] ) -> str:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(_A , **_A )
SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained(_A , **_A )
SCREAMING_SNAKE_CASE__ = """A, <mask> AllenNLP sentence."""
SCREAMING_SNAKE_CASE__ = tokenizer_r.encode_plus(_A , add_special_tokens=_A , return_token_type_ids=_A )
SCREAMING_SNAKE_CASE__ = tokenizer_p.encode_plus(_A , add_special_tokens=_A , return_token_type_ids=_A )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
SCREAMING_SNAKE_CASE__ = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
SCREAMING_SNAKE_CASE__ = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
_A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
_A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) | 6 | from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _snake_case ( __snake_case , __snake_case ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__snake_case , __snake_case ) ) )
def _snake_case ( __snake_case , __snake_case ):
if dataset.ndim != value_array.ndim:
_UpperCamelCase = (
'''Wrong input data\'s dimensions... '''
f"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(__snake_case )
try:
if dataset.shape[1] != value_array.shape[1]:
_UpperCamelCase = (
'''Wrong input data\'s shape... '''
f"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(__snake_case )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('''Wrong shape''' )
if dataset.dtype != value_array.dtype:
_UpperCamelCase = (
'''Input data have different datatype... '''
f"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(__snake_case )
_UpperCamelCase = []
for value in value_array:
_UpperCamelCase = euclidean(__snake_case , dataset[0] )
_UpperCamelCase = dataset[0].tolist()
for dataset_value in dataset[1:]:
_UpperCamelCase = euclidean(__snake_case , __snake_case )
if dist > temp_dist:
_UpperCamelCase = temp_dist
_UpperCamelCase = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _snake_case ( __snake_case , __snake_case ):
return np.dot(__snake_case , __snake_case ) / (norm(__snake_case ) * norm(__snake_case ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class a__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self , lowercase , lowercase ) -> Any:
'''simple docstring'''
return F'gaussian_noise_s={seed}_shape={"_".join([str(lowercase ) for s in shape] )}.npy'
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
def UpperCamelCase ( self , lowercase=0 , lowercase=(4, 4, 64, 64) , lowercase=False ) -> List[Any]:
'''simple docstring'''
A__ = jnp.bfloataa if fpaa else jnp.floataa
A__ = jnp.array(load_hf_numpy(self.get_file_format(lowercase , lowercase ) ) , dtype=lowercase )
return image
def UpperCamelCase ( self , lowercase=False , lowercase="CompVis/stable-diffusion-v1-4" ) -> Tuple:
'''simple docstring'''
A__ = jnp.bfloataa if fpaa else jnp.floataa
A__ = "bf16" if fpaa else None
A__ , A__ = FlaxUNetaDConditionModel.from_pretrained(
lowercase , subfolder="unet" , dtype=lowercase , revision=lowercase )
return model, params
def UpperCamelCase ( self , lowercase=0 , lowercase=(4, 77, 768) , lowercase=False ) -> List[Any]:
'''simple docstring'''
A__ = jnp.bfloataa if fpaa else jnp.floataa
A__ = jnp.array(load_hf_numpy(self.get_file_format(lowercase , lowercase ) ) , dtype=lowercase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
A__ , A__ = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4" , fpaa=lowercase )
A__ = self.get_latents(lowercase , fpaa=lowercase )
A__ = self.get_encoder_hidden_states(lowercase , fpaa=lowercase )
A__ = model.apply(
{"params": params} , lowercase , jnp.array(lowercase , dtype=jnp.intaa ) , encoder_hidden_states=lowercase , ).sample
assert sample.shape == latents.shape
A__ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
A__ = jnp.array(lowercase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowercase , lowercase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> str:
'''simple docstring'''
A__ , A__ = self.get_unet_model(model_id="stabilityai/stable-diffusion-2" , fpaa=lowercase )
A__ = self.get_latents(lowercase , shape=(4, 4, 96, 96) , fpaa=lowercase )
A__ = self.get_encoder_hidden_states(lowercase , shape=(4, 77, 1024) , fpaa=lowercase )
A__ = model.apply(
{"params": params} , lowercase , jnp.array(lowercase , dtype=jnp.intaa ) , encoder_hidden_states=lowercase , ).sample
assert sample.shape == latents.shape
A__ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
A__ = jnp.array(lowercase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowercase , lowercase , atol=1e-2 )
| 710 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class a__ ( snake_case ):
"""simple docstring"""
def __init__( self , *lowercase , **lowercase ) -> None:
'''simple docstring'''
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." , lowercase , )
super().__init__(*lowercase , **lowercase )
| 626 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_a : Tuple = logging.get_logger(__name__)
class a_ ( A__ ):
A__ : List[Any] = ["""pixel_values"""]
def __init__( self : Tuple , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Dict[str, int]] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase__ : Dict , ):
"""simple docstring"""
super().__init__(**a_ )
snake_case : Union[str, Any] = size if size is not None else {"shortest_edge": 256}
snake_case : Dict = get_size_dict(a_ , default_to_square=a_ )
snake_case : Dict = crop_size if crop_size is not None else {"height": 224, "width": 224}
snake_case : Union[str, Any] = get_size_dict(a_ , param_name='''crop_size''' )
snake_case : str = do_resize
snake_case : List[str] = size
snake_case : Any = resample
snake_case : Optional[int] = do_center_crop
snake_case : int = crop_size
snake_case : str = do_rescale
snake_case : Union[str, Any] = rescale_factor
snake_case : int = do_normalize
snake_case : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[int] , ):
"""simple docstring"""
snake_case : int = get_size_dict(a_ , default_to_square=a_ )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
snake_case : List[Any] = get_resize_output_image_size(a_ , size=size['''shortest_edge'''] , default_to_square=a_ )
return resize(a_ , size=a_ , resample=a_ , data_format=a_ , **a_ )
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : int , ):
"""simple docstring"""
snake_case : Dict = get_size_dict(a_ )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(a_ , size=(size['''height'''], size['''width''']) , data_format=a_ , **a_ )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : float , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[Any] ):
"""simple docstring"""
return rescale(a_ , scale=a_ , data_format=a_ , **a_ )
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[Any] , ):
"""simple docstring"""
return normalize(a_ , mean=a_ , std=a_ , data_format=a_ , **a_ )
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[float] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase__ : Union[str, Any] , ):
"""simple docstring"""
snake_case : Optional[Any] = do_resize if do_resize is not None else self.do_resize
snake_case : List[Any] = size if size is not None else self.size
snake_case : Optional[Any] = get_size_dict(a_ , default_to_square=a_ )
snake_case : List[Any] = resample if resample is not None else self.resample
snake_case : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : List[str] = crop_size if crop_size is not None else self.crop_size
snake_case : List[Any] = get_size_dict(a_ , param_name='''crop_size''' )
snake_case : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : List[str] = do_normalize if do_normalize is not None else self.do_normalize
snake_case : Optional[int] = image_mean if image_mean is not None else self.image_mean
snake_case : Union[str, Any] = image_std if image_std is not None else self.image_std
snake_case : List[Any] = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
snake_case : List[Any] = [to_numpy_array(a_ ) for image in images]
if do_resize:
snake_case : int = [self.resize(image=a_ , size=a_ , resample=a_ ) for image in images]
if do_center_crop:
snake_case : str = [self.center_crop(image=a_ , size=a_ ) for image in images]
if do_rescale:
snake_case : Optional[int] = [self.rescale(image=a_ , scale=a_ ) for image in images]
if do_normalize:
snake_case : Dict = [self.normalize(image=a_ , mean=a_ , std=a_ ) for image in images]
snake_case : List[Any] = [to_channel_dimension_format(a_ , a_ ) for image in images]
snake_case : int = {"pixel_values": images}
return BatchFeature(data=a_ , tensor_type=a_ )
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Tuple] = None ):
"""simple docstring"""
snake_case : int = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(a_ ) != len(a_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(a_ ):
snake_case : Any = target_sizes.numpy()
snake_case : List[str] = []
for idx in range(len(a_ ) ):
snake_case : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=a_ )
snake_case : Optional[Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(a_ )
else:
snake_case : List[Any] = logits.argmax(dim=1 )
snake_case : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 598 |
"""simple docstring"""
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> float:
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> float:
"""simple docstring"""
return round(float((moles * 0.08_21 * temperature) / (volume) ) )
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> float:
"""simple docstring"""
return round(float((moles * 0.08_21 * temperature) / (pressure) ) )
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> float:
"""simple docstring"""
return round(float((pressure * volume) / (0.08_21 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 610 | 0 |
"""simple docstring"""
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_A = logging.get_logger(__name__)
def lowercase (_snake_case ,_snake_case ,_snake_case ) -> Any:
'''simple docstring'''
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def lowercase (_snake_case ,_snake_case ,_snake_case = None ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = tesseract_config if tesseract_config is not None else ""
# apply OCR
__UpperCamelCase = to_pil_image(_lowerCamelCase )
__UpperCamelCase , __UpperCamelCase = pil_image.size
__UpperCamelCase = pytesseract.image_to_data(_lowerCamelCase ,lang=_lowerCamelCase ,output_type="dict" ,config=_lowerCamelCase )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
__UpperCamelCase = [idx for idx, word in enumerate(_lowerCamelCase ) if not word.strip()]
__UpperCamelCase = [word for idx, word in enumerate(_lowerCamelCase ) if idx not in irrelevant_indices]
__UpperCamelCase = [coord for idx, coord in enumerate(_lowerCamelCase ) if idx not in irrelevant_indices]
__UpperCamelCase = [coord for idx, coord in enumerate(_lowerCamelCase ) if idx not in irrelevant_indices]
__UpperCamelCase = [coord for idx, coord in enumerate(_lowerCamelCase ) if idx not in irrelevant_indices]
__UpperCamelCase = [coord for idx, coord in enumerate(_lowerCamelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
__UpperCamelCase = []
for x, y, w, h in zip(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ):
__UpperCamelCase = [x, y, x + w, y + h]
actual_boxes.append(_lowerCamelCase )
# finally, normalize the bounding boxes
__UpperCamelCase = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) )
assert len(_lowerCamelCase ) == len(_lowerCamelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : Optional[Any] = ["pixel_values"]
def __init__( self : Union[str, Any] , A_ : str = True , A_ : Union[str, Any] = None , A_ : str = PILImageResampling.BILINEAR , A_ : List[str] = True , A_ : int = None , A_ : Dict = "" , **A_ : Union[str, Any] , )-> None:
super().__init__(**UpperCamelCase__ )
__UpperCamelCase = size if size is not None else {"height": 2_24, "width": 2_24}
__UpperCamelCase = get_size_dict(UpperCamelCase__ )
__UpperCamelCase = do_resize
__UpperCamelCase = size
__UpperCamelCase = resample
__UpperCamelCase = apply_ocr
__UpperCamelCase = ocr_lang
__UpperCamelCase = tesseract_config
def A ( self : Dict , A_ : Optional[Any] , A_ : str , A_ : Any = PILImageResampling.BILINEAR , A_ : str = None , **A_ : str , )-> np.ndarray:
__UpperCamelCase = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
__UpperCamelCase = (size["height"], size["width"])
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : Dict , A_ : List[Any] , A_ : Union[str, Any] = None , A_ : Optional[int] = None , A_ : int = None , A_ : Optional[Any] = None , A_ : int = None , A_ : List[str] = None , A_ : str = None , A_ : Optional[Any] = ChannelDimension.FIRST , **A_ : int , )-> PIL.Image.Image:
__UpperCamelCase = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase = size if size is not None else self.size
__UpperCamelCase = get_size_dict(UpperCamelCase__ )
__UpperCamelCase = resample if resample is not None else self.resample
__UpperCamelCase = apply_ocr if apply_ocr is not None else self.apply_ocr
__UpperCamelCase = ocr_lang if ocr_lang is not None else self.ocr_lang
__UpperCamelCase = tesseract_config if tesseract_config is not None else self.tesseract_config
__UpperCamelCase = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
# All transformations expect numpy arrays.
__UpperCamelCase = [to_numpy_array(UpperCamelCase__ ) for image in images]
if apply_ocr:
requires_backends(self , "pytesseract" )
__UpperCamelCase = []
__UpperCamelCase = []
for image in images:
__UpperCamelCase , __UpperCamelCase = apply_tesseract(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
words_batch.append(UpperCamelCase__ )
boxes_batch.append(UpperCamelCase__ )
if do_resize:
__UpperCamelCase = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
__UpperCamelCase = [flip_channel_order(UpperCamelCase__ ) for image in images]
__UpperCamelCase = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
__UpperCamelCase = BatchFeature(data={"pixel_values": images} , tensor_type=UpperCamelCase__ )
if apply_ocr:
__UpperCamelCase = words_batch
__UpperCamelCase = boxes_batch
return data | 700 |
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
_A = TypeVar("T")
class __UpperCAmelCase ( Generic[T] ):
"""simple docstring"""
_snake_case : deque[T] # Cache store of keys
_snake_case : set[T] # References of the keys in cache
_snake_case : int = 1_0 # Maximum capacity of cache
def __init__( self : List[Any] , A_ : int )-> None:
__UpperCamelCase = deque()
__UpperCamelCase = set()
if not n:
__UpperCamelCase = sys.maxsize
elif n < 0:
raise ValueError("n should be an integer greater than 0." )
else:
__UpperCamelCase = n
def A ( self : Dict , A_ : T )-> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
__UpperCamelCase = self.dq_store.pop()
self.key_reference.remove(A_ )
else:
self.dq_store.remove(A_ )
self.dq_store.appendleft(A_ )
self.key_reference.add(A_ )
def A ( self : int )-> None:
for k in self.dq_store:
print(A_ )
def __repr__( self : int )-> str:
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
_A = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]" | 228 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {}
class a__ ( UpperCamelCase_ ):
snake_case__ = '''llama'''
snake_case__ = ['''past_key_values''']
def __init__( self : str ,a__ : Union[str, Any]=3_2000 ,a__ : Any=4096 ,a__ : int=1_1008 ,a__ : int=32 ,a__ : Optional[Any]=32 ,a__ : List[Any]=None ,a__ : List[Any]="silu" ,a__ : Union[str, Any]=2048 ,a__ : Any=0.02 ,a__ : Any=1E-6 ,a__ : int=True ,a__ : Optional[int]=0 ,a__ : Any=1 ,a__ : Any=2 ,a__ : str=1 ,a__ : str=False ,a__ : Union[str, Any]=None ,**a__ : List[Any] ,) -> str:
"""simple docstring"""
_lowerCAmelCase:Tuple = vocab_size
_lowerCAmelCase:Optional[int] = max_position_embeddings
_lowerCAmelCase:int = hidden_size
_lowerCAmelCase:Dict = intermediate_size
_lowerCAmelCase:List[Any] = num_hidden_layers
_lowerCAmelCase:List[Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
_lowerCAmelCase:List[Any] = num_attention_heads
_lowerCAmelCase:Any = num_key_value_heads
_lowerCAmelCase:Union[str, Any] = hidden_act
_lowerCAmelCase:int = initializer_range
_lowerCAmelCase:Any = rms_norm_eps
_lowerCAmelCase:Optional[Any] = pretraining_tp
_lowerCAmelCase:str = use_cache
_lowerCAmelCase:Union[str, Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=a__ ,bos_token_id=a__ ,eos_token_id=a__ ,tie_word_embeddings=a__ ,**a__ ,)
def __UpperCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,a__) or len(self.rope_scaling) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F'got {self.rope_scaling}')
_lowerCAmelCase:Optional[Any] = self.rope_scaling.get('''type''' ,a__)
_lowerCAmelCase:Any = self.rope_scaling.get('''factor''' ,a__)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}')
if rope_scaling_factor is None or not isinstance(a__ ,a__) or rope_scaling_factor <= 1.0:
raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}')
| 227 |
"""simple docstring"""
import argparse
import datetime
def UpperCAmelCase ( snake_case : str ):
_lowerCAmelCase:Dict = {
'''0''': '''Sunday''',
'''1''': '''Monday''',
'''2''': '''Tuesday''',
'''3''': '''Wednesday''',
'''4''': '''Thursday''',
'''5''': '''Friday''',
'''6''': '''Saturday''',
}
_lowerCAmelCase:Union[str, Any] = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(snake_case ) < 11:
raise ValueError('''Must be 10 characters long''' )
# Get month
_lowerCAmelCase:int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('''Month must be between 1 - 12''' )
_lowerCAmelCase:str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get day
_lowerCAmelCase:int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('''Date must be between 1 - 31''' )
# Get second separator
_lowerCAmelCase:str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get year
_lowerCAmelCase:int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
'''Year out of range. There has to be some sort of limit...right?''' )
# Get datetime obj for validation
_lowerCAmelCase:int = datetime.date(int(snake_case ) , int(snake_case ) , int(snake_case ) )
# Start math
if m <= 2:
_lowerCAmelCase:Optional[Any] = y - 1
_lowerCAmelCase:str = m + 12
# maths var
_lowerCAmelCase:int = int(str(snake_case )[:2] )
_lowerCAmelCase:int = int(str(snake_case )[2:] )
_lowerCAmelCase:int = int(2.6 * m - 5.39 )
_lowerCAmelCase:int = int(c / 4 )
_lowerCAmelCase:int = int(k / 4 )
_lowerCAmelCase:int = int(d + k )
_lowerCAmelCase:int = int(t + u + v + x )
_lowerCAmelCase:int = int(z - (2 * c) )
_lowerCAmelCase:int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('''The date was evaluated incorrectly. Contact developer.''' )
# Response
_lowerCAmelCase:str = F'Your date {date_input}, is a {days[str(snake_case )]}!'
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = argparse.ArgumentParser(
description=(
'''Find out what day of the week nearly any date is or was. Enter '''
'''date as a string in the mm-dd-yyyy or mm/dd/yyyy format'''
)
)
parser.add_argument(
'''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)'''
)
UpperCamelCase__ = parser.parse_args()
zeller(args.date_input)
| 227 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ = {
'''configuration_xlm_roberta_xl''': [
'''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaXLConfig''',
'''XLMRobertaXLOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaXLForCausalLM''',
'''XLMRobertaXLForMaskedLM''',
'''XLMRobertaXLForMultipleChoice''',
'''XLMRobertaXLForQuestionAnswering''',
'''XLMRobertaXLForSequenceClassification''',
'''XLMRobertaXLForTokenClassification''',
'''XLMRobertaXLModel''',
'''XLMRobertaXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 714 |
"""simple docstring"""
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
@slow
@require_torch
def a_ ( self ) -> List[Any]:
UpperCAmelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
UpperCAmelCase = BertTokenizer.from_pretrained('bert-base-uncased' )
UpperCAmelCase = bertabert.config.encoder.vocab_size
UpperCAmelCase = tokenizer.sep_token_id
UpperCAmelCase = tokenizer.cls_token_id
UpperCAmelCase = 1_2_8
UpperCAmelCase = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
UpperCAmelCase = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
UpperCAmelCase = train_dataset.select(range(3_2 ) )
UpperCAmelCase = val_dataset.select(range(1_6 ) )
UpperCAmelCase = 4
def _map_to_encoder_decoder_inputs(lowercase_ ):
# Tokenizer will automatically set [BOS] <text> [EOS]
UpperCAmelCase = tokenizer(batch['article'] , padding='max_length' , truncation=lowercase_ , max_length=5_1_2 )
UpperCAmelCase = tokenizer(batch['highlights'] , padding='max_length' , truncation=lowercase_ , max_length=1_2_8 )
UpperCAmelCase = inputs.input_ids
UpperCAmelCase = inputs.attention_mask
UpperCAmelCase = outputs.input_ids
UpperCAmelCase = outputs.input_ids.copy()
UpperCAmelCase = [
[-1_0_0 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
UpperCAmelCase = outputs.attention_mask
assert all(len(lowercase_ ) == 5_1_2 for x in inputs.input_ids )
assert all(len(lowercase_ ) == 1_2_8 for x in outputs.input_ids )
return batch
def _compute_metrics(lowercase_ ):
UpperCAmelCase = pred.label_ids
UpperCAmelCase = pred.predictions
# all unnecessary tokens are removed
UpperCAmelCase = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
UpperCAmelCase = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
UpperCAmelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(lowercase_ ) )] ) / len(lowercase_ )
return {"accuracy": accuracy}
# map train dataset
UpperCAmelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowercase_ , batch_size=lowercase_ , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
UpperCAmelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowercase_ , batch_size=lowercase_ , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = SeqaSeqTrainingArguments(
output_dir=lowercase_ , per_device_train_batch_size=lowercase_ , per_device_eval_batch_size=lowercase_ , predict_with_generate=lowercase_ , evaluation_strategy='steps' , do_train=lowercase_ , do_eval=lowercase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
UpperCAmelCase = SeqaSeqTrainer(
model=lowercase_ , args=lowercase_ , compute_metrics=_compute_metrics , train_dataset=lowercase_ , eval_dataset=lowercase_ , tokenizer=lowercase_ , )
# start training
trainer.train()
| 183 | 0 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ,__UpperCAmelCase = None ) -> Tuple:
lowerCAmelCase__ : Dict = value
lowerCAmelCase__ : Node | None = None # Added in order to delete a node easier
lowerCAmelCase__ : Node | None = None
lowerCAmelCase__ : Node | None = None
def __repr__( self ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F"""{self.value}""": (self.left, self.right)} ,indent=1 )
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ,__UpperCAmelCase = None ) -> Optional[Any]:
lowerCAmelCase__ : Tuple = root
def __str__( self ) -> str:
return str(self.root )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> None:
if new_children is not None: # reset its kids
lowerCAmelCase__ : Optional[int] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(__UpperCAmelCase ): # If it is the right children
lowerCAmelCase__ : List[Any] = new_children
else:
lowerCAmelCase__ : Dict = new_children
else:
lowerCAmelCase__ : List[Any] = new_children
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def UpperCAmelCase_ ( self ) -> bool:
return self.root is None
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> None:
lowerCAmelCase__ : Optional[int] = Node(__UpperCAmelCase ) # create a new Node
if self.empty(): # if Tree is empty
lowerCAmelCase__ : str = new_node # set its root
else: # Tree is not empty
lowerCAmelCase__ : Any = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
lowerCAmelCase__ : Any = new_node # We insert the new node in a leaf
break
else:
lowerCAmelCase__ : Dict = parent_node.left
else:
if parent_node.right is None:
lowerCAmelCase__ : Optional[int] = new_node
break
else:
lowerCAmelCase__ : str = parent_node.right
lowerCAmelCase__ : List[Any] = parent_node
def UpperCAmelCase_ ( self ,*__UpperCAmelCase ) -> None:
for value in values:
self.__insert(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Node | None:
if self.empty():
raise IndexError("""Warning: Tree is empty! please use another.""" )
else:
lowerCAmelCase__ : Optional[Any] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
lowerCAmelCase__ : Any = node.left if value < node.value else node.right
return node
def UpperCAmelCase_ ( self ,__UpperCAmelCase = None ) -> Node | None:
if node is None:
if self.root is None:
return None
lowerCAmelCase__ : Optional[Any] = self.root
if not self.empty():
while node.right is not None:
lowerCAmelCase__ : Any = node.right
return node
def UpperCAmelCase_ ( self ,__UpperCAmelCase = None ) -> Node | None:
if node is None:
lowerCAmelCase__ : str = self.root
if self.root is None:
return None
if not self.empty():
lowerCAmelCase__ : Dict = self.root
while node.left is not None:
lowerCAmelCase__ : str = node.left
return node
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> None:
lowerCAmelCase__ : Optional[int] = self.search(__UpperCAmelCase ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(__UpperCAmelCase ,__UpperCAmelCase )
elif node.left is None: # Has only right children
self.__reassign_nodes(__UpperCAmelCase ,node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(__UpperCAmelCase ,node.left )
else:
lowerCAmelCase__ : Union[str, Any] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
lowerCAmelCase__ : Dict = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def UpperCAmelCase_ ( self ,__UpperCAmelCase=None ) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> None:
if node:
self.inorder(__UpperCAmelCase ,node.left )
arr.append(node.value )
self.inorder(__UpperCAmelCase ,node.right )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> int:
lowerCAmelCase__ : list[int] = []
self.inorder(__UpperCAmelCase ,__UpperCAmelCase ) # append all values to list using inorder traversal
return arr[k - 1]
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : int = []
if curr_node is not None:
lowerCAmelCase__ : Union[str, Any] = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = (8, 3, 6, 1, 10, 14, 13, 4, 7)
lowerCAmelCase__ : Any = BinarySearchTree()
for i in testlist:
t.insert(UpperCamelCase )
# Prints all the elements of the list in order traversal
print(UpperCamelCase )
if t.search(6 ) is not None:
print("""The value 6 exists""" )
else:
print("""The value 6 doesn't exist""" )
if t.search(-1 ) is not None:
print("""The value -1 exists""" )
else:
print("""The value -1 doesn't exist""" )
if not t.empty():
print("""Max Value: """ , t.get_max().value ) # type: ignore
print("""Min Value: """ , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(UpperCamelCase )
print(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 565 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
# TODO Update this
_lowerCAmelCase = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : str = '''esm'''
def __init__( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=768 ,__UpperCAmelCase=12 ,__UpperCAmelCase=12 ,__UpperCAmelCase=3072 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=1026 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1E-12 ,__UpperCAmelCase="absolute" ,__UpperCAmelCase=True ,__UpperCAmelCase=None ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,**__UpperCAmelCase ,) -> Dict:
super().__init__(pad_token_id=__UpperCAmelCase ,mask_token_id=__UpperCAmelCase ,**__UpperCAmelCase )
lowerCAmelCase__ : List[str] = vocab_size
lowerCAmelCase__ : List[str] = hidden_size
lowerCAmelCase__ : int = num_hidden_layers
lowerCAmelCase__ : Optional[Any] = num_attention_heads
lowerCAmelCase__ : Optional[int] = intermediate_size
lowerCAmelCase__ : Tuple = hidden_dropout_prob
lowerCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[Any] = max_position_embeddings
lowerCAmelCase__ : Tuple = initializer_range
lowerCAmelCase__ : str = layer_norm_eps
lowerCAmelCase__ : Optional[Any] = position_embedding_type
lowerCAmelCase__ : int = use_cache
lowerCAmelCase__ : Union[str, Any] = emb_layer_norm_before
lowerCAmelCase__ : List[Any] = token_dropout
lowerCAmelCase__ : str = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
lowerCAmelCase__ : Dict = EsmFoldConfig()
elif isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : Dict = EsmFoldConfig(**__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
lowerCAmelCase__ : Optional[Any] = get_default_vocab_list()
else:
lowerCAmelCase__ : Any = vocab_list
else:
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : Any = None
if self.esmfold_config is not None and getattr(self.esmfold_config ,"""use_esm_attn_map""" ,__UpperCAmelCase ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def UpperCAmelCase_ ( self ) -> Optional[int]:
lowerCAmelCase__ : Any = super().to_dict()
if isinstance(self.esmfold_config ,__UpperCAmelCase ):
lowerCAmelCase__ : Optional[int] = self.esmfold_config.to_dict()
return output
@dataclass
class lowerCAmelCase_:
'''simple docstring'''
__lowercase : str = None
__lowercase : bool = True
__lowercase : bool = False
__lowercase : bool = False
__lowercase : bool = False
__lowercase : float = 0
__lowercase : bool = True
__lowercase : bool = False
__lowercase : int = 1_2_8
__lowercase : "TrunkConfig" = None
def UpperCAmelCase_ ( self ) -> int:
if self.trunk is None:
lowerCAmelCase__ : Dict = TrunkConfig()
elif isinstance(self.trunk ,__UpperCAmelCase ):
lowerCAmelCase__ : Tuple = TrunkConfig(**self.trunk )
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : Dict = asdict(self )
lowerCAmelCase__ : Optional[int] = self.trunk.to_dict()
return output
@dataclass
class lowerCAmelCase_:
'''simple docstring'''
__lowercase : int = 4_8
__lowercase : int = 1_0_2_4
__lowercase : int = 1_2_8
__lowercase : int = 3_2
__lowercase : int = 3_2
__lowercase : int = 3_2
__lowercase : float = 0
__lowercase : float = 0
__lowercase : bool = False
__lowercase : int = 4
__lowercase : Optional[int] = 1_2_8
__lowercase : "StructureModuleConfig" = None
def UpperCAmelCase_ ( self ) -> List[Any]:
if self.structure_module is None:
lowerCAmelCase__ : str = StructureModuleConfig()
elif isinstance(self.structure_module ,__UpperCAmelCase ):
lowerCAmelCase__ : Dict = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
lowerCAmelCase__ : Union[str, Any] = self.sequence_state_dim // self.sequence_head_width
lowerCAmelCase__ : str = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
lowerCAmelCase__ : Optional[int] = asdict(self )
lowerCAmelCase__ : Dict = self.structure_module.to_dict()
return output
@dataclass
class lowerCAmelCase_:
'''simple docstring'''
__lowercase : int = 3_8_4
__lowercase : int = 1_2_8
__lowercase : int = 1_6
__lowercase : int = 1_2_8
__lowercase : int = 1_2
__lowercase : int = 4
__lowercase : int = 8
__lowercase : float = 0.1
__lowercase : int = 8
__lowercase : int = 1
__lowercase : int = 2
__lowercase : int = 7
__lowercase : int = 1_0
__lowercase : float = 1e-8
__lowercase : float = 1e5
def UpperCAmelCase_ ( self ) -> int:
return asdict(self )
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 565 | 1 |
def UpperCamelCase_( _A :int )-> int:
UpperCamelCase__ = [1]
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = 0, 0, 0
UpperCamelCase__ = ugly_nums[ia] * 2
UpperCamelCase__ = ugly_nums[ia] * 3
UpperCamelCase__ = ugly_nums[ia] * 5
for _ in range(1 , _A ):
UpperCamelCase__ = min(_A , _A , _A )
ugly_nums.append(_A )
if next_num == next_a:
ia += 1
UpperCamelCase__ = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
UpperCamelCase__ = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
UpperCamelCase__ = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(2_0_0) = }''')
| 719 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
_UpperCamelCase : int = 'data2vec-text'
def __init__( self , snake_case=30522 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=2 , snake_case=0.02 , snake_case=1E-1_2 , snake_case=1 , snake_case=0 , snake_case=2 , snake_case="absolute" , snake_case=True , snake_case=None , **snake_case , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = position_embedding_type
UpperCamelCase__ = use_cache
UpperCamelCase__ = classifier_dropout
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
@property
def snake_case__ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 185 | 0 |
import os
# Precomputes a list of the 100 first triangular numbers
lowerCamelCase_ = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def UpperCAmelCase_ ( ):
SCREAMING_SNAKE_CASE__ =os.path.dirname(os.path.realpath(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE__ =os.path.join(__UpperCamelCase, """words.txt""" )
SCREAMING_SNAKE_CASE__ =''''''
with open(__UpperCamelCase ) as f:
SCREAMING_SNAKE_CASE__ =f.readline()
SCREAMING_SNAKE_CASE__ =[word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )]
SCREAMING_SNAKE_CASE__ =[
word
for word in [sum(ord(__UpperCamelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__UpperCamelCase )
if __name__ == "__main__":
print(solution())
| 151 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {}
class a__ ( UpperCamelCase_ ):
snake_case__ = '''llama'''
snake_case__ = ['''past_key_values''']
def __init__( self : str ,a__ : Union[str, Any]=3_2000 ,a__ : Any=4096 ,a__ : int=1_1008 ,a__ : int=32 ,a__ : Optional[Any]=32 ,a__ : List[Any]=None ,a__ : List[Any]="silu" ,a__ : Union[str, Any]=2048 ,a__ : Any=0.02 ,a__ : Any=1E-6 ,a__ : int=True ,a__ : Optional[int]=0 ,a__ : Any=1 ,a__ : Any=2 ,a__ : str=1 ,a__ : str=False ,a__ : Union[str, Any]=None ,**a__ : List[Any] ,) -> str:
"""simple docstring"""
_lowerCAmelCase:Tuple = vocab_size
_lowerCAmelCase:Optional[int] = max_position_embeddings
_lowerCAmelCase:int = hidden_size
_lowerCAmelCase:Dict = intermediate_size
_lowerCAmelCase:List[Any] = num_hidden_layers
_lowerCAmelCase:List[Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
_lowerCAmelCase:List[Any] = num_attention_heads
_lowerCAmelCase:Any = num_key_value_heads
_lowerCAmelCase:Union[str, Any] = hidden_act
_lowerCAmelCase:int = initializer_range
_lowerCAmelCase:Any = rms_norm_eps
_lowerCAmelCase:Optional[Any] = pretraining_tp
_lowerCAmelCase:str = use_cache
_lowerCAmelCase:Union[str, Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=a__ ,bos_token_id=a__ ,eos_token_id=a__ ,tie_word_embeddings=a__ ,**a__ ,)
def __UpperCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,a__) or len(self.rope_scaling) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F'got {self.rope_scaling}')
_lowerCAmelCase:Optional[Any] = self.rope_scaling.get('''type''' ,a__)
_lowerCAmelCase:Any = self.rope_scaling.get('''factor''' ,a__)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}')
if rope_scaling_factor is None or not isinstance(a__ ,a__) or rope_scaling_factor <= 1.0:
raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}')
| 227 | 0 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def __magic_name__ ( SCREAMING_SNAKE_CASE = "" ) -> dict[str, float]:
_lowercase : Any = url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250'
_lowercase : List[str] = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE ).text , 'html.parser' )
_lowercase : str = soup.find_all('td' , attrs='titleColumn' )
_lowercase : List[str] = soup.find_all('td' , class_='ratingColumn imdbRating' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
}
def __magic_name__ ( SCREAMING_SNAKE_CASE = "IMDb_Top_250_Movies.csv" ) -> None:
_lowercase : int = get_imdb_top_aaa_movies()
with open(SCREAMING_SNAKE_CASE , 'w' , newline='' ) as out_file:
_lowercase : Dict = csv.writer(SCREAMING_SNAKE_CASE )
writer.writerow(['Movie title', 'IMDb rating'] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 712 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
UpperCamelCase = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
UpperCamelCase = typing.Union[np.floataa, int, float] # noqa: UP007
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> VectorOut:
return np.sqrt(np.sum((np.asarray(SCREAMING_SNAKE_CASE ) - np.asarray(SCREAMING_SNAKE_CASE )) ** 2 ) )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> VectorOut:
return sum((va - va) ** 2 for va, va in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) ** (1 / 2)
if __name__ == "__main__":
def __magic_name__ ( ) -> None:
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=10_000 , globals=globals() , ) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=10_000 , globals=globals() , ) )
benchmark()
| 677 | 0 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__UpperCAmelCase = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
__UpperCAmelCase = json.load(f)
@require_torch
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str:
return FSMTTokenizer.from_pretrained(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
lowerCAmelCase__ = FSMTForConditionalGeneration.from_pretrained(lowerCamelCase_ ).to(lowerCamelCase_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['''en-ru''', 26.0],
['''ru-en''', 22.0],
['''en-de''', 22.0],
['''de-en''', 29.0],
] )
@slow
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
lowerCAmelCase__ = F"""facebook/wmt19-{pair}"""
lowerCAmelCase__ = self.get_tokenizer(lowerCamelCase_ )
lowerCAmelCase__ = self.get_model(lowerCamelCase_ )
lowerCAmelCase__ = bleu_data[pair]['''src''']
lowerCAmelCase__ = bleu_data[pair]['''tgt''']
lowerCAmelCase__ = tokenizer(lowerCamelCase_ , return_tensors='''pt''' , truncation=lowerCamelCase_ , padding='''longest''' ).to(lowerCamelCase_ )
lowerCAmelCase__ = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowerCAmelCase__ = tokenizer.batch_decode(
lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ )
lowerCAmelCase__ = calculate_bleu(lowerCamelCase_ , lowerCamelCase_ )
print(lowerCamelCase_ )
self.assertGreaterEqual(scores['''bleu'''] , lowerCamelCase_ ) | 90 | import os
# Precomputes a list of the 100 first triangular numbers
UpperCamelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def lowerCamelCase_ ( ) -> List[Any]:
__A : Optional[Any] = os.path.dirname(os.path.realpath(_lowercase ) )
__A : Any = os.path.join(_lowercase , "words.txt" )
__A : Dict = ""
with open(_lowercase ) as f:
__A : List[str] = f.readline()
__A : Union[str, Any] = [word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
__A : List[Any] = [
word
for word in [sum(ord(_lowercase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_lowercase )
if __name__ == "__main__":
print(solution())
| 520 | 0 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json",
}
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Optional[int] = "autoformer"
_UpperCamelCase : List[Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = "student_t" , _lowerCAmelCase = "nll" , _lowerCAmelCase = 1 , _lowerCAmelCase = [1, 2, 3, 4, 5, 6, 7] , _lowerCAmelCase = True , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 6_4 , _lowerCAmelCase = 2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 3_2 , _lowerCAmelCase = 3_2 , _lowerCAmelCase = "gelu" , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 1_0_0 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = True , _lowerCAmelCase=True , _lowerCAmelCase = 1_0 , _lowerCAmelCase = 2_5 , _lowerCAmelCase = 3 , **_lowerCAmelCase , ):
# time series specific configuration
_lowercase : str = prediction_length
_lowercase : List[Any] = context_length if context_length is not None else prediction_length
_lowercase : Union[str, Any] = distribution_output
_lowercase : Optional[Any] = loss
_lowercase : Optional[int] = input_size
_lowercase : List[Any] = num_time_features
_lowercase : Optional[int] = lags_sequence
_lowercase : Tuple = scaling
_lowercase : Any = num_dynamic_real_features
_lowercase : Dict = num_static_real_features
_lowercase : List[str] = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(_lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
_lowercase : Any = cardinality
else:
_lowercase : Optional[int] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(_lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
_lowercase : Union[str, Any] = embedding_dimension
else:
_lowercase : List[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
_lowercase : str = num_parallel_samples
# Transformer architecture configuration
_lowercase : Optional[Any] = input_size * len(self.lags_sequence ) + self._number_of_features
_lowercase : str = d_model
_lowercase : Optional[int] = encoder_attention_heads
_lowercase : List[str] = decoder_attention_heads
_lowercase : List[Any] = encoder_ffn_dim
_lowercase : Optional[Any] = decoder_ffn_dim
_lowercase : str = encoder_layers
_lowercase : Union[str, Any] = decoder_layers
_lowercase : List[str] = dropout
_lowercase : str = attention_dropout
_lowercase : Dict = activation_dropout
_lowercase : int = encoder_layerdrop
_lowercase : Tuple = decoder_layerdrop
_lowercase : Union[str, Any] = activation_function
_lowercase : Tuple = init_std
_lowercase : Dict = use_cache
# Autoformer
_lowercase : Tuple = label_length
_lowercase : str = moving_average
_lowercase : List[Any] = autocorrelation_factor
super().__init__(is_encoder_decoder=_lowerCAmelCase , **_lowerCAmelCase )
@property
def __a ( self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 677 |
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
UpperCamelCase = logging.getLogger(__name__)
UpperCamelCase = {"facebook/bart-base": BartForConditionalGeneration}
UpperCamelCase = {"facebook/bart-base": BartTokenizer}
def __magic_name__ ( ) -> str:
_lowercase : Optional[int] = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' )
parser.add_argument(
'--validation_file' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='A csv or a json file containing the validation data.' )
parser.add_argument(
'--max_length' , type=SCREAMING_SNAKE_CASE , default=5 , help='The maximum total input sequence length after tokenization.' , )
parser.add_argument(
'--num_beams' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help=(
'Number of beams to use for evaluation. This argument will be '
'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'
) , )
parser.add_argument(
'--model_name_or_path' , type=SCREAMING_SNAKE_CASE , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=SCREAMING_SNAKE_CASE , )
parser.add_argument(
'--config_name' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Pretrained config name or path if not the same as model_name' , )
parser.add_argument(
'--device' , type=SCREAMING_SNAKE_CASE , default='cpu' , help='Device where the model will be run' , )
parser.add_argument('--output_file_path' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Where to store the final ONNX file.' )
_lowercase : Optional[Any] = parser.parse_args()
return args
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="cpu" ) -> List[Any]:
_lowercase : Dict = model_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
_lowercase : int = tokenizer_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE )
if model_name in ["facebook/bart-base"]:
_lowercase : Dict = 0
_lowercase : Optional[int] = None
_lowercase : Union[str, Any] = 0
return huggingface_model, tokenizer
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
model.eval()
_lowercase : List[Any] = None
_lowercase : List[str] = torch.jit.script(BARTBeamSearchGenerator(SCREAMING_SNAKE_CASE ) )
with torch.no_grad():
_lowercase : Optional[int] = 'My friends are cool but they eat too many carbs.'
_lowercase : int = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors='pt' ).to(model.device )
_lowercase : str = model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , early_stopping=SCREAMING_SNAKE_CASE , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
SCREAMING_SNAKE_CASE , (
inputs['input_ids'],
inputs['attention_mask'],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , SCREAMING_SNAKE_CASE , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'seq'},
'output_ids': {0: 'batch', 1: 'seq_out'},
} , example_outputs=SCREAMING_SNAKE_CASE , )
logger.info('Model exported to {}'.format(SCREAMING_SNAKE_CASE ) )
_lowercase : str = remove_dup_initializers(os.path.abspath(SCREAMING_SNAKE_CASE ) )
logger.info('Deduplicated and optimized model written to {}'.format(SCREAMING_SNAKE_CASE ) )
_lowercase : Union[str, Any] = onnxruntime.InferenceSession(SCREAMING_SNAKE_CASE )
_lowercase : Union[str, Any] = ort_sess.run(
SCREAMING_SNAKE_CASE , {
'input_ids': inputs['input_ids'].cpu().numpy(),
'attention_mask': inputs['attention_mask'].cpu().numpy(),
'num_beams': np.array(SCREAMING_SNAKE_CASE ),
'max_length': np.array(SCREAMING_SNAKE_CASE ),
'decoder_start_token_id': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('Model outputs from torch and ONNX Runtime are similar.' )
logger.info('Success.' )
def __magic_name__ ( ) -> Any:
_lowercase : Dict = parse_args()
_lowercase : Union[str, Any] = 5
_lowercase : Union[str, Any] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_lowercase : Optional[Any] = torch.device(args.device )
_lowercase , _lowercase : List[Any] = load_model_tokenizer(args.model_name_or_path , SCREAMING_SNAKE_CASE )
if model.config.decoder_start_token_id is None:
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' )
model.to(SCREAMING_SNAKE_CASE )
if args.max_length:
_lowercase : Any = args.max_length
if args.num_beams:
_lowercase : List[str] = args.num_beams
if args.output_file_path:
_lowercase : Union[str, Any] = args.output_file_path
else:
_lowercase : Tuple = 'BART.onnx'
logger.info('Exporting model to ONNX' )
export_and_validate_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 677 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__magic_name__ : Union[str, Any] = {"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : str = [
"""VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMAEForPreTraining""",
"""ViTMAELayer""",
"""ViTMAEModel""",
"""ViTMAEPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : List[Any] = [
"""TFViTMAEForPreTraining""",
"""TFViTMAEModel""",
"""TFViTMAEPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__magic_name__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 102 |
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_lowerCamelCase = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_lowerCamelCase = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
_lowerCamelCase = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _snake_case (datasets.Metric):
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ) ,id="references" ),
} ) ,)
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = 1 ,_snake_case = 4 ,):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_snake_case ,hypotheses=_snake_case ,min_len=_snake_case ,max_len=_snake_case )
}
| 71 | 0 |
from copy import deepcopy
class __snake_case :
'''simple docstring'''
def __init__( self : Dict , A : List[Any] = None , A : str = None ):
if arr is None and size is not None:
__snake_case: str = size
__snake_case: Optional[Any] = [0] * size
elif arr is not None:
self.init(lowercase__ )
else:
raise ValueError("""Either arr or size must be specified""" )
def UpperCAmelCase__ ( self : List[Any] , A : List[str] ):
__snake_case: Optional[Any] = len(lowercase__ )
__snake_case: int = deepcopy(lowercase__ )
for i in range(1 , self.size ):
__snake_case: Optional[Any] = self.next_(lowercase__ )
if j < self.size:
self.tree[j] += self.tree[i]
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: Dict = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
__snake_case: Optional[int] = self.next_(lowercase__ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def UpperCAmelCase__ ( A : List[str] ):
return index + (index & (-index))
@staticmethod
def UpperCAmelCase__ ( A : Optional[Any] ):
return index - (index & (-index))
def UpperCAmelCase__ ( self : Optional[Any] , A : str , A : Tuple ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
__snake_case: Tuple = self.next_(lowercase__ )
def UpperCAmelCase__ ( self : List[Any] , A : Optional[Any] , A : Optional[int] ):
self.add(lowercase__ , value - self.get(lowercase__ ) )
def UpperCAmelCase__ ( self : str , A : Optional[Any] ):
if right == 0:
return 0
__snake_case: List[str] = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
__snake_case: Optional[int] = self.prev(lowercase__ )
return result
def UpperCAmelCase__ ( self : Optional[Any] , A : Optional[Any] , A : List[str] ):
return self.prefix(lowercase__ ) - self.prefix(lowercase__ )
def UpperCAmelCase__ ( self : Tuple , A : Dict ):
return self.query(lowercase__ , index + 1 )
def UpperCAmelCase__ ( self : Union[str, Any] , A : Dict ):
value -= self.tree[0]
if value < 0:
return -1
__snake_case: Tuple = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
__snake_case: Tuple = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 6_50, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 6_00, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 6_00, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
] )
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Optional[Any] ):
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=A , )
assert hasattr(self , """env""" )
def UpperCAmelCase__ ( self : int , A : List[Any] ):
__snake_case: Optional[int] = f'''{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}'''
# distributed data settings
__snake_case: Optional[int] = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=A , instance_count=A , instance_type=self.instance_type , debugger_hook_config=A , hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=A , py_version="""py36""" , )
def UpperCAmelCase__ ( self : List[Any] , A : int ):
TrainingJobAnalytics(A ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(2,)] )
def UpperCAmelCase__ ( self : str , A : Any ):
# create estimator
__snake_case: str = self.create_estimator(A )
# run training
estimator.fit()
# result dataframe
__snake_case: Optional[int] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__snake_case: Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
__snake_case: List[str] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__snake_case: Tuple = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , A )
| 155 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class UpperCAmelCase ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self : int ):
UpperCAmelCase__ :Tuple = 1_0
def __SCREAMING_SNAKE_CASE ( self : int ):
UpperCAmelCase__ :Optional[Any] = [1, 2, 3, 4]
UpperCAmelCase__ :str = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__lowerCamelCase , self.block_size , 0 ) , __lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
UpperCAmelCase__ :Union[str, Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
UpperCAmelCase__ :Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(__lowerCamelCase , self.block_size , 0 ) , __lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : str ):
UpperCAmelCase__ :List[str] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3]
UpperCAmelCase__ :Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(__lowerCamelCase , self.block_size , 0 ) , __lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
UpperCAmelCase__ :int = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
UpperCAmelCase__ , UpperCAmelCase__ :Any = process_story(__lowerCamelCase )
self.assertEqual(__lowerCamelCase , [] )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
UpperCAmelCase__ :int = ''''''
UpperCAmelCase__ , UpperCAmelCase__ :List[Any] = process_story(__lowerCamelCase )
self.assertEqual(__lowerCamelCase , [] )
self.assertEqual(__lowerCamelCase , [] )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
UpperCAmelCase__ :Any = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
UpperCAmelCase__ , UpperCAmelCase__ :Optional[int] = process_story(__lowerCamelCase )
UpperCAmelCase__ :Union[str, Any] = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
UpperCAmelCase__ :Union[str, Any] = ['''It was the best of times.''']
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
UpperCAmelCase__ :Dict = torch.tensor([1, 2, 3, 4] )
UpperCAmelCase__ :int = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(__lowerCamelCase , 0 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
UpperCAmelCase__ :List[Any] = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] )
UpperCAmelCase__ :str = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__lowerCamelCase , 2_3 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
UpperCAmelCase__ :Optional[Any] = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
UpperCAmelCase__ :Any = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__lowerCamelCase , 1 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
UpperCAmelCase__ :int = 1_0_1
UpperCAmelCase__ :Optional[Any] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] )
UpperCAmelCase__ :int = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
UpperCAmelCase__ :Any = compute_token_type_ids(__lowerCamelCase , __lowerCamelCase )
np.testing.assert_array_equal(__lowerCamelCase , __lowerCamelCase )
| 467 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=_snake_case ):
UpperCAmelCase = ["speech"]
def __init__( self : List[Any] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : List[str] ):
requires_backends(self , ['''speech'''] )
class UpperCAmelCase ( metaclass=_snake_case ):
UpperCAmelCase = ["speech"]
def __init__( self : int , *__lowerCamelCase : List[Any] , **__lowerCamelCase : List[str] ):
requires_backends(self , ['''speech'''] )
| 467 | 1 |
from math import factorial
def _lowerCAmelCase ( _lowerCAmelCase = 20 ) -> int:
'''simple docstring'''
__snake_case = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
__snake_case = n // 2
return int(factorial(_lowerCAmelCase ) / (factorial(_lowerCAmelCase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
A : int = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 473 |
from __future__ import annotations
from collections.abc import Iterator
class UpperCamelCase:
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : int ) -> None:
'''simple docstring'''
__snake_case = value
__snake_case = None
__snake_case = None
class UpperCamelCase:
def __init__( self : int , SCREAMING_SNAKE_CASE : Node ) -> None:
'''simple docstring'''
__snake_case = tree
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE : Node | None ) -> int:
'''simple docstring'''
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : int ) -> Iterator[int]:
'''simple docstring'''
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 473 | 1 |
import tensorflow as tf
from ...tf_utils import shape_list
class __magic_name__ ( tf.keras.layers.Layer):
'''simple docstring'''
def __init__( self: List[str] , _lowerCamelCase: Optional[Any] , _lowerCamelCase: str , _lowerCamelCase: List[Any] , _lowerCamelCase: Any , _lowerCamelCase: Dict=1 , _lowerCamelCase: str=False , **_lowerCamelCase: int ):
super().__init__(**_a )
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = d_embed
SCREAMING_SNAKE_CASE_ = d_proj
SCREAMING_SNAKE_CASE_ = cutoffs + [vocab_size]
SCREAMING_SNAKE_CASE_ = [0] + self.cutoffs
SCREAMING_SNAKE_CASE_ = div_val
SCREAMING_SNAKE_CASE_ = self.cutoffs[0]
SCREAMING_SNAKE_CASE_ = len(self.cutoffs ) - 1
SCREAMING_SNAKE_CASE_ = self.shortlist_size + self.n_clusters
SCREAMING_SNAKE_CASE_ = keep_order
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
def _A ( self: List[Any] , _lowerCamelCase: Any ):
if self.n_clusters > 0:
SCREAMING_SNAKE_CASE_ = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='''zeros''' , trainable=_a , name='''cluster_weight''' )
SCREAMING_SNAKE_CASE_ = self.add_weight(
shape=(self.n_clusters,) , initializer='''zeros''' , trainable=_a , name='''cluster_bias''' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
SCREAMING_SNAKE_CASE_ = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='''zeros''' , trainable=_a , name=f"out_projs_._{i}" , )
self.out_projs.append(_a )
else:
self.out_projs.append(_a )
SCREAMING_SNAKE_CASE_ = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='''zeros''' , trainable=_a , name=f"out_layers_._{i}_._weight" , )
SCREAMING_SNAKE_CASE_ = self.add_weight(
shape=(self.vocab_size,) , initializer='''zeros''' , trainable=_a , name=f"out_layers_._{i}_._bias" , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
SCREAMING_SNAKE_CASE_ = self.d_embed // (self.div_val**i)
SCREAMING_SNAKE_CASE_ = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='''zeros''' , trainable=_a , name=f"out_projs_._{i}" )
self.out_projs.append(_a )
SCREAMING_SNAKE_CASE_ = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='''zeros''' , trainable=_a , name=f"out_layers_._{i}_._weight" , )
SCREAMING_SNAKE_CASE_ = self.add_weight(
shape=(r_idx - l_idx,) , initializer='''zeros''' , trainable=_a , name=f"out_layers_._{i}_._bias" , )
self.out_layers.append((weight, bias) )
super().build(_a )
@staticmethod
def _A ( _lowerCamelCase: Tuple , _lowerCamelCase: Tuple , _lowerCamelCase: Tuple , _lowerCamelCase: str=None ):
SCREAMING_SNAKE_CASE_ = x
if proj is not None:
SCREAMING_SNAKE_CASE_ = tf.einsum('''ibd,ed->ibe''' , _a , _a )
return tf.einsum('''ibd,nd->ibn''' , _a , _a ) + b
@staticmethod
def _A ( _lowerCamelCase: int , _lowerCamelCase: Dict ):
SCREAMING_SNAKE_CASE_ = shape_list(_a )
SCREAMING_SNAKE_CASE_ = tf.range(lp_size[0] , dtype=target.dtype )
SCREAMING_SNAKE_CASE_ = tf.stack([r, target] , 1 )
return tf.gather_nd(_a , _a )
def _A ( self: Any , _lowerCamelCase: List[str] , _lowerCamelCase: Dict , _lowerCamelCase: str=True , _lowerCamelCase: Optional[int]=False ):
SCREAMING_SNAKE_CASE_ = 0
if self.n_clusters == 0:
SCREAMING_SNAKE_CASE_ = self._logit(_a , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
SCREAMING_SNAKE_CASE_ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=_a , logits=_a )
SCREAMING_SNAKE_CASE_ = tf.nn.log_softmax(_a , axis=-1 )
else:
SCREAMING_SNAKE_CASE_ = shape_list(_a )
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
SCREAMING_SNAKE_CASE_ = (target >= l_idx) & (target < r_idx)
SCREAMING_SNAKE_CASE_ = tf.where(_a )
SCREAMING_SNAKE_CASE_ = tf.boolean_mask(_a , _a ) - l_idx
if self.div_val == 1:
SCREAMING_SNAKE_CASE_ = self.out_layers[0][0][l_idx:r_idx]
SCREAMING_SNAKE_CASE_ = self.out_layers[0][1][l_idx:r_idx]
else:
SCREAMING_SNAKE_CASE_ = self.out_layers[i][0]
SCREAMING_SNAKE_CASE_ = self.out_layers[i][1]
if i == 0:
SCREAMING_SNAKE_CASE_ = tf.concat([cur_W, self.cluster_weight] , 0 )
SCREAMING_SNAKE_CASE_ = tf.concat([cur_b, self.cluster_bias] , 0 )
SCREAMING_SNAKE_CASE_ = self._logit(_a , _a , _a , self.out_projs[0] )
SCREAMING_SNAKE_CASE_ = tf.nn.log_softmax(_a )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
SCREAMING_SNAKE_CASE_ = tf.boolean_mask(_a , _a )
SCREAMING_SNAKE_CASE_ = self._gather_logprob(_a , _a )
else:
SCREAMING_SNAKE_CASE_ = self._logit(_a , _a , _a , self.out_projs[i] )
SCREAMING_SNAKE_CASE_ = tf.nn.log_softmax(_a )
SCREAMING_SNAKE_CASE_ = self.cutoffs[0] + i - 1 # No probability for the head cluster
SCREAMING_SNAKE_CASE_ = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(_a )
if target is not None:
SCREAMING_SNAKE_CASE_ = tf.boolean_mask(_a , _a )
SCREAMING_SNAKE_CASE_ = tf.boolean_mask(_a , _a )
SCREAMING_SNAKE_CASE_ = self._gather_logprob(_a , _a )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(_a , -cur_logprob , shape_list(_a ) )
SCREAMING_SNAKE_CASE_ = tf.concat(_a , axis=-1 )
if target is not None:
if return_mean:
SCREAMING_SNAKE_CASE_ = tf.reduce_mean(_a )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(_a )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(_a , name=self.name , aggregation='''mean''' if return_mean else '''''' )
return out
| 234 |
def A(__a: int = 50 ):
lowerCAmelCase_ = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 122 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
A_ = "https://www.indeed.co.in/jobs?q=mobile+app+development&l="
def _UpperCamelCase ( __UpperCamelCase = "mumbai" ) -> Generator[tuple[str, str], None, None]:
lowerCamelCase_ = BeautifulSoup(requests.get(url + location ).content ,'html.parser' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('div' ,attrs={'data-tn-component': 'organicJob'} ):
lowerCamelCase_ = job.find('a' ,attrs={'data-tn-element': 'jobTitle'} ).text.strip()
lowerCamelCase_ = job.find('span' ,{'class': 'company'} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("Bangalore"), 1):
print(f'''Job {i:>2} is {job[0]} at {job[1]}''')
| 384 |
'''simple docstring'''
from ....utils import logging
A_ = logging.get_logger(__name__)
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=2048 ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = config.__dict__
lowerCamelCase_ = modal_hidden_size
if num_labels:
lowerCamelCase_ = num_labels
| 384 | 1 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__SCREAMING_SNAKE_CASE :List[str] = random.Random()
if is_torch_available():
import torch
def UpperCAmelCase_ ( __lowercase : Tuple , __lowercase : Union[str, Any]=1.0 , __lowercase : str=None , __lowercase : str=None ) -> Optional[int]:
'''simple docstring'''
if rng is None:
_UpperCAmelCase = global_rng
_UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class A_ ( unittest.TestCase ):
def __init__( self : Dict , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any]=7 , snake_case_ : Tuple=4_0_0 , snake_case_ : Optional[Any]=2_0_0_0 , snake_case_ : Optional[int]=1 , snake_case_ : int=0.0 , snake_case_ : List[Any]=1_6_0_0_0 , snake_case_ : Optional[int]=True , snake_case_ : List[Any]=True , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = min_seq_length
_UpperCAmelCase = max_seq_length
_UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCAmelCase = feature_size
_UpperCAmelCase = padding_value
_UpperCAmelCase = sampling_rate
_UpperCAmelCase = return_attention_mask
_UpperCAmelCase = do_normalize
def lowercase ( self : Union[str, Any] ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase ( self : Dict , snake_case_ : Optional[Any]=False , snake_case_ : Tuple=False ):
def _flatten(snake_case_ : Tuple ):
return list(itertools.chain(*snake_case_ ) )
if equal_length:
_UpperCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_UpperCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_UpperCAmelCase = [np.asarray(snake_case_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Tuple = ASTFeatureExtractor
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = ASTFeatureExtractionTester(self )
def lowercase ( self : Any ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_UpperCAmelCase = [np.asarray(snake_case_ ) for speech_input in speech_inputs]
# Test not batched input
_UpperCAmelCase = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
_UpperCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1e-3 ) )
# Test batched
_UpperCAmelCase = feat_extract(snake_case_ , padding=snake_case_ , return_tensors="np" ).input_values
_UpperCAmelCase = feat_extract(snake_case_ , padding=snake_case_ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(snake_case_ , snake_case_ ):
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_UpperCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_UpperCAmelCase = np.asarray(snake_case_ )
_UpperCAmelCase = feat_extract(snake_case_ , return_tensors="np" ).input_values
_UpperCAmelCase = feat_extract(snake_case_ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(snake_case_ , snake_case_ ):
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1e-3 ) )
@require_torch
def lowercase ( self : Tuple ):
import torch
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa )
_UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCAmelCase = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_UpperCAmelCase = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def lowercase ( self : str , snake_case_ : str ):
from datasets import load_dataset
_UpperCAmelCase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
_UpperCAmelCase = ds.sort("id" ).select(range(snake_case_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def lowercase ( self : List[str] ):
# fmt: off
_UpperCAmelCase = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
_UpperCAmelCase = self._load_datasamples(1 )
_UpperCAmelCase = ASTFeatureExtractor()
_UpperCAmelCase = feature_extractor(snake_case_ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 1_0_2_4, 1_2_8) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , snake_case_ , atol=1e-4 ) )
| 236 |
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__SCREAMING_SNAKE_CASE :int = {
'''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'''bert''': (BertConfig, BertForMaskedLM, BertTokenizer),
'''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def UpperCAmelCase_ ( __lowercase : List[str] ) -> str:
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def UpperCAmelCase_ ( __lowercase : List[str] , __lowercase : Dict ) -> str:
'''simple docstring'''
if args.student_type == "roberta":
_UpperCAmelCase = False
elif args.student_type == "gpt2":
_UpperCAmelCase = False
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : List[Any] ) -> Tuple:
'''simple docstring'''
if args.student_type == "roberta":
_UpperCAmelCase = False
def UpperCAmelCase_ ( ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser(description="Training" )
parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." )
parser.add_argument(
"--dump_path" , type=__lowercase , required=__lowercase , help="The output directory (log, checkpoints, parameters, etc.)" )
parser.add_argument(
"--data_file" , type=__lowercase , required=__lowercase , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , )
parser.add_argument(
"--student_type" , type=__lowercase , choices=["distilbert", "roberta", "gpt2"] , required=__lowercase , help="The student type (DistilBERT, RoBERTa)." , )
parser.add_argument("--student_config" , type=__lowercase , required=__lowercase , help="Path to the student configuration." )
parser.add_argument(
"--student_pretrained_weights" , default=__lowercase , type=__lowercase , help="Load student initialization checkpoint." )
parser.add_argument(
"--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=__lowercase , help="Teacher type (BERT, RoBERTa)." )
parser.add_argument("--teacher_name" , type=__lowercase , required=__lowercase , help="The teacher model." )
parser.add_argument("--temperature" , default=2.0 , type=__lowercase , help="Temperature for the softmax temperature." )
parser.add_argument(
"--alpha_ce" , default=0.5 , type=__lowercase , help="Linear weight for the distillation loss. Must be >=0." )
parser.add_argument(
"--alpha_mlm" , default=0.0 , type=__lowercase , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , )
parser.add_argument("--alpha_clm" , default=0.5 , type=__lowercase , help="Linear weight for the CLM loss. Must be >=0." )
parser.add_argument("--alpha_mse" , default=0.0 , type=__lowercase , help="Linear weight of the MSE loss. Must be >=0." )
parser.add_argument(
"--alpha_cos" , default=0.0 , type=__lowercase , help="Linear weight of the cosine embedding loss. Must be >=0." )
parser.add_argument(
"--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." )
parser.add_argument(
"--mlm_mask_prop" , default=0.15 , type=__lowercase , help="Proportion of tokens for which we need to make a prediction." , )
parser.add_argument("--word_mask" , default=0.8 , type=__lowercase , help="Proportion of tokens to mask out." )
parser.add_argument("--word_keep" , default=0.1 , type=__lowercase , help="Proportion of tokens to keep." )
parser.add_argument("--word_rand" , default=0.1 , type=__lowercase , help="Proportion of tokens to randomly replace." )
parser.add_argument(
"--mlm_smoothing" , default=0.7 , type=__lowercase , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , )
parser.add_argument("--token_counts" , type=__lowercase , help="The token counts in the data_file for MLM." )
parser.add_argument(
"--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , )
parser.add_argument(
"--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , )
parser.add_argument(
"--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , )
parser.add_argument("--n_epoch" , type=__lowercase , default=3 , help="Number of pass on the whole dataset." )
parser.add_argument("--batch_size" , type=__lowercase , default=5 , help="Batch size (for each process)." )
parser.add_argument(
"--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , )
parser.add_argument(
"--gradient_accumulation_steps" , type=__lowercase , default=50 , help="Gradient accumulation for larger training batches." , )
parser.add_argument("--warmup_prop" , default=0.05 , type=__lowercase , help="Linear warmup proportion." )
parser.add_argument("--weight_decay" , default=0.0 , type=__lowercase , help="Weight decay if we apply some." )
parser.add_argument("--learning_rate" , default=5E-4 , type=__lowercase , help="The initial learning rate for Adam." )
parser.add_argument("--adam_epsilon" , default=1E-6 , type=__lowercase , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , default=5.0 , type=__lowercase , help="Max gradient norm." )
parser.add_argument("--initializer_range" , default=0.02 , type=__lowercase , help="Random initialization range." )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=__lowercase , default="O1" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_gpu" , type=__lowercase , default=1 , help="Number of GPUs in the node." )
parser.add_argument("--local_rank" , type=__lowercase , default=-1 , help="Distributed training - Local rank" )
parser.add_argument("--seed" , type=__lowercase , default=56 , help="Random seed" )
parser.add_argument("--log_interval" , type=__lowercase , default=500 , help="Tensorboard logging interval." )
parser.add_argument("--checkpoint_interval" , type=__lowercase , default=4000 , help="Checkpoint interval." )
_UpperCAmelCase = parser.parse_args()
sanity_checks(__lowercase )
# ARGS #
init_gpu_params(__lowercase )
set_seed(__lowercase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f'Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'
" itUse `--force` if you want to overwrite it" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f'Experiment will be dumped and logged in {args.dump_path}' )
# SAVE PARAMS #
logger.info(f'Param: {args}' )
with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f:
json.dump(vars(__lowercase ) , __lowercase , indent=4 )
git_log(args.dump_path )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = MODEL_CLASSES[args.student_type]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
_UpperCAmelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
_UpperCAmelCase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
_UpperCAmelCase = tokenizer.all_special_tokens.index(__lowercase )
_UpperCAmelCase = tokenizer.all_special_ids[idx]
logger.info(f'Special tokens {special_tok_ids}' )
_UpperCAmelCase = special_tok_ids
_UpperCAmelCase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f'Loading data from {args.data_file}' )
with open(args.data_file , "rb" ) as fp:
_UpperCAmelCase = pickle.load(__lowercase )
if args.mlm:
logger.info(f'Loading token counts from {args.token_counts} (already pre-computed)' )
with open(args.token_counts , "rb" ) as fp:
_UpperCAmelCase = pickle.load(__lowercase )
_UpperCAmelCase = np.maximum(__lowercase , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
_UpperCAmelCase = 0.0 # do not predict special tokens
_UpperCAmelCase = torch.from_numpy(__lowercase )
else:
_UpperCAmelCase = None
_UpperCAmelCase = LmSeqsDataset(params=__lowercase , data=__lowercase )
logger.info("Data loader created." )
# STUDENT #
logger.info(f'Loading student config from {args.student_config}' )
_UpperCAmelCase = student_config_class.from_pretrained(args.student_config )
_UpperCAmelCase = True
if args.student_pretrained_weights is not None:
logger.info(f'Loading pretrained weights from {args.student_pretrained_weights}' )
_UpperCAmelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=__lowercase )
else:
_UpperCAmelCase = student_model_class(__lowercase )
if args.n_gpu > 0:
student.to(f'cuda:{args.local_rank}' )
logger.info("Student loaded." )
# TEACHER #
_UpperCAmelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__lowercase )
if args.n_gpu > 0:
teacher.to(f'cuda:{args.local_rank}' )
logger.info(f'Teacher loaded from {args.teacher_name}.' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(__lowercase , __lowercase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(__lowercase , __lowercase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
_UpperCAmelCase = Distiller(
params=__lowercase , dataset=__lowercase , token_probs=__lowercase , student=__lowercase , teacher=__lowercase )
distiller.train()
logger.info("Let's go get some drinks." )
if __name__ == "__main__":
main()
| 236 | 1 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
SCREAMING_SNAKE_CASE__ = random.Random()
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : Tuple=1.0 , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : List[Any]=None ) -> Optional[Any]:
"""simple docstring"""
if rng is None:
snake_case = global_rng
snake_case = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=4_00 , lowerCAmelCase=20_00 , lowerCAmelCase=20_48 , lowerCAmelCase=1_28 , lowerCAmelCase=1 , lowerCAmelCase=5_12 , lowerCAmelCase=30 , lowerCAmelCase=4_41_00 , ):
"""simple docstring"""
snake_case = parent
snake_case = batch_size
snake_case = min_seq_length
snake_case = max_seq_length
snake_case = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case = spectrogram_length
snake_case = feature_size
snake_case = num_audio_channels
snake_case = hop_length
snake_case = chunk_length
snake_case = sampling_rate
def snake_case ( self ):
"""simple docstring"""
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def snake_case ( self , lowerCAmelCase=False , lowerCAmelCase=False ):
"""simple docstring"""
def _flatten(lowerCAmelCase ):
return list(itertools.chain(*lowerCAmelCase ) )
if equal_length:
snake_case = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
snake_case = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case = [np.asarray(lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCAmelCase_ ( lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : str = TvltFeatureExtractor
def snake_case ( self ):
"""simple docstring"""
snake_case = TvltFeatureExtractionTester(self )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(lowerCAmelCase , 'spectrogram_length' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'feature_size' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'num_audio_channels' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'hop_length' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'chunk_length' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'sampling_rate' ) )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = feat_extract_first.save_pretrained(lowerCAmelCase )[0]
check_json_file_has_correct_format(lowerCAmelCase )
snake_case = self.feature_extraction_class.from_pretrained(lowerCAmelCase )
snake_case = feat_extract_first.to_dict()
snake_case = feat_extract_second.to_dict()
snake_case = dict_first.pop('mel_filters' )
snake_case = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase ) )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = os.path.join(lowerCAmelCase , 'feat_extract.json' )
feat_extract_first.to_json_file(lowerCAmelCase )
snake_case = self.feature_extraction_class.from_json_file(lowerCAmelCase )
snake_case = feat_extract_first.to_dict()
snake_case = feat_extract_second.to_dict()
snake_case = dict_first.pop('mel_filters' )
snake_case = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase ) )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
snake_case = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case = [np.asarray(lowerCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
snake_case = feature_extractor(np_speech_inputs[0] , return_tensors='np' , sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
snake_case = feature_extractor(lowerCAmelCase , return_tensors='np' , sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
snake_case = feature_extractor(
lowerCAmelCase , return_tensors='np' , sampling_rate=4_41_00 , mask_audio=lowerCAmelCase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
snake_case = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
snake_case = np.asarray(lowerCAmelCase )
snake_case = feature_extractor(lowerCAmelCase , return_tensors='np' , sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
snake_case = ds.sort('id' ).select(range(lowerCAmelCase ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def snake_case ( self ):
"""simple docstring"""
snake_case = self._load_datasamples(1 )
snake_case = TvltFeatureExtractor()
snake_case = feature_extractor(lowerCAmelCase , return_tensors='pt' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_92, 1_28) )
snake_case = torch.tensor([[-0.30_32, -0.27_08], [-0.44_34, -0.40_07]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , lowerCAmelCase , atol=1E-4 ) )
| 721 | """simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=3 , lowerCAmelCase=18 , lowerCAmelCase=30 , lowerCAmelCase=4_00 , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=[0.5, 0.5, 0.5] , lowerCAmelCase=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
snake_case = parent
snake_case = batch_size
snake_case = num_channels
snake_case = image_size
snake_case = min_resolution
snake_case = max_resolution
snake_case = do_resize
snake_case = size if size is not None else {'height': 18, 'width': 20}
snake_case = do_thumbnail
snake_case = do_align_axis
snake_case = do_pad
snake_case = do_normalize
snake_case = image_mean
snake_case = image_std
def snake_case ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : str = DonutImageProcessor if is_vision_available() else None
def snake_case ( self ):
"""simple docstring"""
snake_case = DonutImageProcessingTester(self )
@property
def snake_case ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ):
"""simple docstring"""
snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'size' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'do_thumbnail' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'do_align_long_axis' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'do_pad' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'image_std' ) )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def snake_case ( self ):
"""simple docstring"""
pass
@is_flaky()
def snake_case ( self ):
"""simple docstring"""
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
snake_case = image_processing(lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def snake_case ( self ):
"""simple docstring"""
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
snake_case = image_processing(lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def snake_case ( self ):
"""simple docstring"""
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
snake_case = image_processing(lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 104 | 0 |
"""simple docstring"""
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = [
'''word_embeddings_layernorm.weight''',
'''word_embeddings_layernorm.bias''',
'''input_layernorm.weight''',
'''input_layernorm.bias''',
'''post_attention_layernorm.weight''',
'''post_attention_layernorm.bias''',
'''self_attention.dense.bias''',
'''mlp.dense_4h_to_h.bias''',
'''ln_f.weight''',
'''ln_f.bias''',
]
__UpperCamelCase = [
'''mlp.dense_4h_to_h.weight''',
'''self_attention.dense.weight''',
]
def lowercase (SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE = {
'word_embeddings.weight': 'word_embeddings.weight',
'word_embeddings.norm.weight': 'word_embeddings_layernorm.weight',
'word_embeddings.norm.bias': 'word_embeddings_layernorm.bias',
'weight': 'ln_f.weight',
'bias': 'ln_f.bias',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
SCREAMING_SNAKE_CASE = int(re.match(R'.*layer_(\d*).*' , SCREAMING_SNAKE_CASE_ )[1] )
layer_number -= 3
return F'h.{layer_number}.' + key
def lowercase (SCREAMING_SNAKE_CASE_ : Optional[int] ) -> List[str]:
if dtype == torch.bool:
return 1 / 8
SCREAMING_SNAKE_CASE = re.search(R'[^\d](\d+)$' , str(SCREAMING_SNAKE_CASE_ ) )
if bit_search is None:
raise ValueError(F'`dtype` is not a valid dtype: {dtype}.' )
SCREAMING_SNAKE_CASE = int(bit_search.groups()[0] )
return bit_size // 8
def lowercase (SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> List[Any]:
# Construct model
if bloom_config_file == "":
SCREAMING_SNAKE_CASE = BloomConfig()
else:
SCREAMING_SNAKE_CASE = BloomConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
if shard_model:
SCREAMING_SNAKE_CASE = os.listdir(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = sorted(filter(lambda SCREAMING_SNAKE_CASE_ : s.startswith('layer' ) and "model_00" in s , SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE = {'weight_map': {}, 'metadata': {}}
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = BloomConfig()
for j, file in enumerate(SCREAMING_SNAKE_CASE_ ):
print('Processing file: {}'.format(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE = None
for i in range(SCREAMING_SNAKE_CASE_ ):
# load all TP files
SCREAMING_SNAKE_CASE = file.replace('model_00' , F'model_0{i}' )
SCREAMING_SNAKE_CASE = torch.load(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , map_location='cpu' )
# Rename keys in the transformers names
SCREAMING_SNAKE_CASE = list(temp.keys() )
for key in keys:
SCREAMING_SNAKE_CASE = temp.pop(SCREAMING_SNAKE_CASE_ )
if tensors is None:
SCREAMING_SNAKE_CASE = temp
else:
for key in tensors.keys():
if any(key.endswith(SCREAMING_SNAKE_CASE_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
SCREAMING_SNAKE_CASE = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
SCREAMING_SNAKE_CASE = torch.cat([tensors[key], temp[key]] , dim=SCREAMING_SNAKE_CASE_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(SCREAMING_SNAKE_CASE_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
SCREAMING_SNAKE_CASE = tensors[key] / pretraining_tp
torch.save(
SCREAMING_SNAKE_CASE_ , os.path.join(
SCREAMING_SNAKE_CASE_ , 'pytorch_model_{}-of-{}.bin'.format(str(j + 1 ).zfill(5 ) , str(len(SCREAMING_SNAKE_CASE_ ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
SCREAMING_SNAKE_CASE = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
SCREAMING_SNAKE_CASE = 'pytorch_model_{}-of-{}.bin'.format(
str(j + 1 ).zfill(5 ) , str(len(SCREAMING_SNAKE_CASE_ ) ).zfill(5 ) )
SCREAMING_SNAKE_CASE = BloomConfig()
SCREAMING_SNAKE_CASE = pytorch_dump_folder_path + '/' + CONFIG_NAME
SCREAMING_SNAKE_CASE = total_size
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , WEIGHTS_NAME + '.index.json' ) , 'w' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE = json.dumps(SCREAMING_SNAKE_CASE_ , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ ) + '\n'
f.write(SCREAMING_SNAKE_CASE_ )
else:
SCREAMING_SNAKE_CASE = BloomModel(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = os.listdir(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = sorted(filter(lambda SCREAMING_SNAKE_CASE_ : s.startswith('layer' ) and "model_00" in s , SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE = None
for i, file in enumerate(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = None
for i in range(SCREAMING_SNAKE_CASE_ ):
# load all TP files
SCREAMING_SNAKE_CASE = file.replace('model_00' , F'model_0{i}' )
SCREAMING_SNAKE_CASE = torch.load(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , map_location='cpu' )
# Rename keys in the transformers names
SCREAMING_SNAKE_CASE = list(temp.keys() )
for key in keys:
SCREAMING_SNAKE_CASE = temp.pop(SCREAMING_SNAKE_CASE_ )
if tensors is None:
SCREAMING_SNAKE_CASE = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(SCREAMING_SNAKE_CASE_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
SCREAMING_SNAKE_CASE = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
SCREAMING_SNAKE_CASE = torch.cat([tensors[key], temp[key]] , dim=SCREAMING_SNAKE_CASE_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(SCREAMING_SNAKE_CASE_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
SCREAMING_SNAKE_CASE = tensors[key] / pretraining_tp
SCREAMING_SNAKE_CASE = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
assert not other_keys.unexpected_keys, F'The keys {other_keys.unexpected_keys} are unexpected'
if missing_keys is None:
SCREAMING_SNAKE_CASE = set(other_keys.missing_keys )
else:
SCREAMING_SNAKE_CASE = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F'The keys {missing_keys} are missing'
# Save pytorch-model
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
SCREAMING_SNAKE_CASE = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}' )
if config.torch_dtype is not None:
SCREAMING_SNAKE_CASE = model.to(config.torch_dtype )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bloom_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path to the Megatron-LM checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--bloom_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--shard_model''',
action='''store_true''',
help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''',
)
parser.add_argument(
'''--pretraining_tp''',
default=4,
type=int,
help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''',
)
__UpperCamelCase = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 247 |
"""simple docstring"""
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = CustomTokenizer
pass
| 247 | 1 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class A_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] , snake_case__ : Optional[Any] , snake_case__ : List[str]=13 , snake_case__ : List[str]=7 , snake_case__ : Union[str, Any]=True , snake_case__ : int=True , snake_case__ : List[Any]=True , snake_case__ : List[Any]=True , snake_case__ : Optional[int]=99 , snake_case__ : Any=32 , snake_case__ : Any=5 , snake_case__ : int=4 , snake_case__ : Optional[Any]=37 , snake_case__ : Dict="gelu" , snake_case__ : Tuple=0.1 , snake_case__ : Tuple=0.1 , snake_case__ : int=5_12 , snake_case__ : Optional[Any]=16 , snake_case__ : List[Any]=2 , snake_case__ : Union[str, Any]=0.02 , snake_case__ : List[str]=4 , ):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_attention_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_choices
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_attention_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase = config_and_inputs
lowercase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class A_ ( __a , unittest.TestCase ):
'''simple docstring'''
_A :List[Any] = True
_A :Union[str, Any] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowercase = FlaxRoFormerModelTester(self )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
for model_class_name in self.all_model_classes:
lowercase = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=snake_case__ )
lowercase = model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case__ )
@require_flax
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowercase = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
lowercase = jnp.array([[0, 1, 2, 3, 4, 5]] )
lowercase = model(snake_case__ )[0]
lowercase = 5_00_00
lowercase = (1, 6, vocab_size)
self.assertEqual(output.shape , snake_case__ )
lowercase = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , snake_case__ , atol=1E-4 ) )
| 703 |
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : str =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : str ={
'''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class A_ ( __a ):
_A :Tuple = '''data2vec-audio'''
def __init__( self : Optional[Any] , snake_case__ : List[Any]=32 , snake_case__ : List[Any]=7_68 , snake_case__ : int=12 , snake_case__ : Dict=12 , snake_case__ : List[str]=30_72 , snake_case__ : List[str]="gelu" , snake_case__ : Optional[int]=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : int=0.1 , snake_case__ : Tuple=0.0 , snake_case__ : Tuple=0.1 , snake_case__ : Any=0.1 , snake_case__ : Dict=0.02 , snake_case__ : List[str]=1E-5 , snake_case__ : Optional[Any]="gelu" , snake_case__ : Union[str, Any]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , snake_case__ : List[str]=(5, 2, 2, 2, 2, 2, 2) , snake_case__ : str=(10, 3, 3, 3, 3, 2, 2) , snake_case__ : Any=False , snake_case__ : List[str]=16 , snake_case__ : Any=19 , snake_case__ : Optional[Any]=5 , snake_case__ : str=0.05 , snake_case__ : Tuple=10 , snake_case__ : Optional[Any]=2 , snake_case__ : Dict=0.0 , snake_case__ : int=10 , snake_case__ : Any=0 , snake_case__ : int="sum" , snake_case__ : str=False , snake_case__ : str=False , snake_case__ : Optional[int]=2_56 , snake_case__ : List[str]=(5_12, 5_12, 5_12, 5_12, 15_00) , snake_case__ : List[str]=(5, 3, 3, 1, 1) , snake_case__ : int=(1, 2, 3, 1, 1) , snake_case__ : Optional[Any]=5_12 , snake_case__ : Dict=0 , snake_case__ : Optional[Any]=1 , snake_case__ : Tuple=2 , snake_case__ : Tuple=False , snake_case__ : List[str]=3 , snake_case__ : List[str]=2 , snake_case__ : Tuple=3 , snake_case__ : List[str]=None , **snake_case__ : str , ):
super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ )
lowercase = hidden_size
lowercase = feat_extract_activation
lowercase = list(snake_case__ )
lowercase = list(snake_case__ )
lowercase = list(snake_case__ )
lowercase = conv_bias
lowercase = num_conv_pos_embeddings
lowercase = num_conv_pos_embedding_groups
lowercase = conv_pos_kernel_size
lowercase = len(self.conv_dim )
lowercase = num_hidden_layers
lowercase = intermediate_size
lowercase = hidden_act
lowercase = num_attention_heads
lowercase = hidden_dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = feat_proj_dropout
lowercase = final_dropout
lowercase = layerdrop
lowercase = layer_norm_eps
lowercase = initializer_range
lowercase = vocab_size
lowercase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase = mask_time_prob
lowercase = mask_time_length
lowercase = mask_time_min_masks
lowercase = mask_feature_prob
lowercase = mask_feature_length
lowercase = mask_feature_min_masks
# ctc loss
lowercase = ctc_loss_reduction
lowercase = ctc_zero_infinity
# adapter
lowercase = add_adapter
lowercase = adapter_kernel_size
lowercase = adapter_stride
lowercase = num_adapter_layers
lowercase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowercase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowercase = list(snake_case__ )
lowercase = list(snake_case__ )
lowercase = list(snake_case__ )
lowercase = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
return math.prod(self.conv_stride )
| 72 | 0 |
'''simple docstring'''
from __future__ import annotations
class a__:
'''simple docstring'''
def __init__( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = order
# a_{0} ... a_{k}
lowerCAmelCase = [1.0] + [0.0] * order
# b_{0} ... b_{k}
lowerCAmelCase = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
lowerCAmelCase = [0.0] * self.order
# y[n-1] ... y[n-k]
lowerCAmelCase = [0.0] * self.order
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
if len(__lowerCAmelCase) < self.order:
lowerCAmelCase = [1.0, *a_coeffs]
if len(__lowerCAmelCase) != self.order + 1:
lowerCAmelCase = (
f"Expected a_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(__lowerCAmelCase)}"
)
raise ValueError(__lowerCAmelCase)
if len(__lowerCAmelCase) != self.order + 1:
lowerCAmelCase = (
f"Expected b_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(__lowerCAmelCase)}"
)
raise ValueError(__lowerCAmelCase)
lowerCAmelCase = a_coeffs
lowerCAmelCase = b_coeffs
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
lowerCAmelCase = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
lowerCAmelCase = self.input_history[:-1]
lowerCAmelCase = self.output_history[:-1]
lowerCAmelCase = sample
lowerCAmelCase = result
return result
| 370 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCamelCase (__snake_case ):
def __init__( self :Any , __magic_name__ :Union[str, Any] , __magic_name__ :Optional[Any] ) ->Dict:
super().__init__()
# make sure scheduler can always be converted to DDIM
lowercase : Optional[Any] = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__magic_name__ , scheduler=__magic_name__ )
@torch.no_grad()
def __call__( self :Optional[int] , __magic_name__ :int = 1 , __magic_name__ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , __magic_name__ :float = 0.0 , __magic_name__ :int = 50 , __magic_name__ :Optional[bool] = None , __magic_name__ :Optional[str] = "pil" , __magic_name__ :bool = True , ) ->Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , __magic_name__ ):
lowercase : int = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowercase : Tuple = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(__magic_name__ , __magic_name__ ) and len(__magic_name__ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(__magic_name__ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
lowercase : Dict = randn_tensor(__magic_name__ , generator=__magic_name__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__magic_name__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase : Union[str, Any] = self.unet(__magic_name__ , __magic_name__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowercase : int = self.scheduler.step(
__magic_name__ , __magic_name__ , __magic_name__ , eta=__magic_name__ , use_clipped_model_output=__magic_name__ , generator=__magic_name__ ).prev_sample
lowercase : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 )
lowercase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase : Any = self.numpy_to_pil(__magic_name__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__magic_name__ )
| 264 | 0 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class _a (unittest.TestCase):
"""simple docstring"""
def __init__( self , A__ , A__=7 , A__=3 , A__=30 , A__=4_00 , A__=True , A__=None , A__=True , A__=[0.5, 0.5, 0.5] , A__=[0.5, 0.5, 0.5] , A__=True , A__=1 / 2_55 , A__=True , ) -> Optional[int]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_SCREAMING_SNAKE_CASE = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = min_resolution
_SCREAMING_SNAKE_CASE = max_resolution
_SCREAMING_SNAKE_CASE = do_resize
_SCREAMING_SNAKE_CASE = size
_SCREAMING_SNAKE_CASE = do_normalize
_SCREAMING_SNAKE_CASE = image_mean
_SCREAMING_SNAKE_CASE = image_std
_SCREAMING_SNAKE_CASE = do_rescale
_SCREAMING_SNAKE_CASE = rescale_factor
_SCREAMING_SNAKE_CASE = do_pad
def UpperCamelCase ( self ) -> List[str]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase ( self , A__ , A__=False ) -> List[str]:
if not batched:
_SCREAMING_SNAKE_CASE = image_inputs[0]
if isinstance(A__ , Image.Image ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = image.size
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = image.shape[1], image.shape[2]
if w < h:
_SCREAMING_SNAKE_CASE = int(self.size["""shortest_edge"""] * h / w )
_SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""]
elif w > h:
_SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""]
_SCREAMING_SNAKE_CASE = int(self.size["""shortest_edge"""] * w / h )
else:
_SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""]
_SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""]
else:
_SCREAMING_SNAKE_CASE = []
for image in image_inputs:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_SCREAMING_SNAKE_CASE = max(A__ , key=lambda A__ : item[0] )[0]
_SCREAMING_SNAKE_CASE = max(A__ , key=lambda A__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a (_lowerCamelCase , unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = DetaImageProcessor if is_vision_available() else None
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = DetaImageProcessingTester(self )
@property
def UpperCamelCase ( self ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , """image_mean""" ) )
self.assertTrue(hasattr(A__ , """image_std""" ) )
self.assertTrue(hasattr(A__ , """do_normalize""" ) )
self.assertTrue(hasattr(A__ , """do_resize""" ) )
self.assertTrue(hasattr(A__ , """do_rescale""" ) )
self.assertTrue(hasattr(A__ , """do_pad""" ) )
self.assertTrue(hasattr(A__ , """size""" ) )
def UpperCamelCase ( self ) -> Dict:
_SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} )
self.assertEqual(image_processor.do_pad , A__ )
def UpperCamelCase ( self ) -> List[str]:
pass
def UpperCamelCase ( self ) -> List[str]:
# Initialize image_processing
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
_SCREAMING_SNAKE_CASE = image_processing(A__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self ) -> Optional[int]:
# Initialize image_processing
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(A__ , return_tensors="""pt""" ).pixel_values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self ) -> List[Any]:
# Initialize image_processing
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(A__ , return_tensors="""pt""" ).pixel_values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCamelCase ( self ) -> Tuple:
# prepare image and target
_SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
_SCREAMING_SNAKE_CASE = json.loads(f.read() )
_SCREAMING_SNAKE_CASE = {"""image_id""": 3_97_69, """annotations""": target}
# encode them
_SCREAMING_SNAKE_CASE = DetaImageProcessor()
_SCREAMING_SNAKE_CASE = image_processing(images=A__ , annotations=A__ , return_tensors="""pt""" )
# verify pixel values
_SCREAMING_SNAKE_CASE = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , A__ )
_SCREAMING_SNAKE_CASE = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , A__ , atol=1E-4 ) )
# verify area
_SCREAMING_SNAKE_CASE = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , A__ ) )
# verify boxes
_SCREAMING_SNAKE_CASE = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , A__ )
_SCREAMING_SNAKE_CASE = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , A__ , atol=1E-3 ) )
# verify image_id
_SCREAMING_SNAKE_CASE = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , A__ ) )
# verify is_crowd
_SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , A__ ) )
# verify class_labels
_SCREAMING_SNAKE_CASE = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , A__ ) )
# verify orig_size
_SCREAMING_SNAKE_CASE = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , A__ ) )
# verify size
_SCREAMING_SNAKE_CASE = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , A__ ) )
@slow
def UpperCamelCase ( self ) -> List[str]:
# prepare image, target and masks_path
_SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
_SCREAMING_SNAKE_CASE = json.loads(f.read() )
_SCREAMING_SNAKE_CASE = {"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
_SCREAMING_SNAKE_CASE = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
_SCREAMING_SNAKE_CASE = DetaImageProcessor(format="""coco_panoptic""" )
_SCREAMING_SNAKE_CASE = image_processing(images=A__ , annotations=A__ , masks_path=A__ , return_tensors="""pt""" )
# verify pixel values
_SCREAMING_SNAKE_CASE = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , A__ )
_SCREAMING_SNAKE_CASE = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , A__ , atol=1E-4 ) )
# verify area
_SCREAMING_SNAKE_CASE = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , A__ ) )
# verify boxes
_SCREAMING_SNAKE_CASE = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , A__ )
_SCREAMING_SNAKE_CASE = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , A__ , atol=1E-3 ) )
# verify image_id
_SCREAMING_SNAKE_CASE = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , A__ ) )
# verify is_crowd
_SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , A__ ) )
# verify class_labels
_SCREAMING_SNAKE_CASE = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , A__ ) )
# verify masks
_SCREAMING_SNAKE_CASE = 82_28_73
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , A__ )
# verify orig_size
_SCREAMING_SNAKE_CASE = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , A__ ) )
# verify size
_SCREAMING_SNAKE_CASE = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , A__ ) )
| 0 |
'''simple docstring'''
import sys
UpperCamelCase__ : int = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ = N ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = -sys.maxsize - 1
for i in range(len(SCREAMING_SNAKE_CASE_ ) - 12 ):
_SCREAMING_SNAKE_CASE = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
_SCREAMING_SNAKE_CASE = product
return largest_product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 0 | 1 |
'''simple docstring'''
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def A_ ( snake_case ):
return EnvironmentCommand()
class _snake_case ( __UpperCAmelCase ):
@staticmethod
def __UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
SCREAMING_SNAKE_CASE:int = parser.add_parser("env" )
download_parser.set_defaults(func=lowerCAmelCase_ )
def __UpperCamelCase ( self : Any ):
SCREAMING_SNAKE_CASE:int = huggingface_hub.__version__
SCREAMING_SNAKE_CASE:Optional[Any] = 'not installed'
SCREAMING_SNAKE_CASE:Union[str, Any] = 'NA'
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE:Union[str, Any] = torch.__version__
SCREAMING_SNAKE_CASE:Union[str, Any] = torch.cuda.is_available()
SCREAMING_SNAKE_CASE:Union[str, Any] = 'not installed'
if is_transformers_available():
import transformers
SCREAMING_SNAKE_CASE:Tuple = transformers.__version__
SCREAMING_SNAKE_CASE:Optional[int] = 'not installed'
if is_accelerate_available():
import accelerate
SCREAMING_SNAKE_CASE:str = accelerate.__version__
SCREAMING_SNAKE_CASE:str = 'not installed'
if is_xformers_available():
import xformers
SCREAMING_SNAKE_CASE:Optional[int] = xformers.__version__
SCREAMING_SNAKE_CASE:Any = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': F'''{pt_version} ({pt_cuda_available})''',
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(lowerCAmelCase_ ) )
return info
@staticmethod
def __UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 143 |
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
lowerCAmelCase__ = get_logger(__name__)
lowerCAmelCase__ = r'\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n'
class _lowerCAmelCase :
@add_start_docstrings(lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> jnp.ndarray:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class _lowerCAmelCase :
@add_start_docstrings(lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> jnp.ndarray:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class _lowerCAmelCase ( __UpperCAmelCase ):
@add_start_docstrings(lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ) -> jnp.ndarray:
for processor in self:
_SCREAMING_SNAKE_CASE : int = inspect.signature(processor.__call__ ).parameters
if len(lowerCAmelCase_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F"""Make sure that all the required parameters: {list(function_args.keys() )} for """
F"""{processor.__class__} are passed to the logits processor.""" )
_SCREAMING_SNAKE_CASE : Any = processor(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
else:
_SCREAMING_SNAKE_CASE : Optional[int] = processor(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return scores
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__( self , lowerCAmelCase_ ) -> Any:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not (temperature > 0):
raise ValueError(F"""`temperature` has to be a strictly positive float, but is {temperature}""" )
_SCREAMING_SNAKE_CASE : Tuple = temperature
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> jnp.ndarray:
_SCREAMING_SNAKE_CASE : Optional[int] = scores / self.temperature
return scores
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ = -float('Inf' ) , lowerCAmelCase_ = 1 ) -> int:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F"""`top_p` has to be a float > 0 and < 1, but is {top_p}""" )
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or (min_tokens_to_keep < 1):
raise ValueError(F"""`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = top_p
_SCREAMING_SNAKE_CASE : Optional[int] = filter_value
_SCREAMING_SNAKE_CASE : Any = min_tokens_to_keep
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> jnp.ndarray:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = lax.top_k(lowerCAmelCase_ , scores.shape[-1] )
_SCREAMING_SNAKE_CASE : Dict = jnp.full_like(lowerCAmelCase_ , self.filter_value )
_SCREAMING_SNAKE_CASE : int = jax.nn.softmax(lowerCAmelCase_ , axis=-1 ).cumsum(axis=-1 )
_SCREAMING_SNAKE_CASE : List[str] = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
_SCREAMING_SNAKE_CASE : Optional[int] = jnp.roll(lowerCAmelCase_ , 1 )
score_mask |= score_mask.at[:, 0].set(lowerCAmelCase_ )
# min tokens to keep
_SCREAMING_SNAKE_CASE : Optional[Any] = score_mask.at[:, : self.min_tokens_to_keep].set(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Tuple = jnp.where(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Dict = jax.lax.sort_key_val(lowerCAmelCase_ , lowerCAmelCase_ )[-1]
return next_scores
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ = -float('Inf' ) , lowerCAmelCase_ = 1 ) -> Tuple:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or top_k <= 0:
raise ValueError(F"""`top_k` has to be a strictly positive integer, but is {top_k}""" )
_SCREAMING_SNAKE_CASE : Tuple = max(lowerCAmelCase_ , lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = filter_value
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> jnp.ndarray:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = scores.shape
_SCREAMING_SNAKE_CASE : int = jnp.full(batch_size * vocab_size , self.filter_value )
_SCREAMING_SNAKE_CASE : Union[str, Any] = min(self.top_k , scores.shape[-1] ) # Safety check
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = lax.top_k(lowerCAmelCase_ , lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : List[Any] = jnp.broadcast_to((jnp.arange(lowerCAmelCase_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
_SCREAMING_SNAKE_CASE : List[Any] = topk_scores.flatten()
_SCREAMING_SNAKE_CASE : Dict = topk_indices.flatten() + shift
_SCREAMING_SNAKE_CASE : List[str] = next_scores_flat.at[topk_indices_flat].set(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : int = next_scores_flat.reshape(lowerCAmelCase_ , lowerCAmelCase_ )
return next_scores
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__( self , lowerCAmelCase_ ) -> Dict:
_SCREAMING_SNAKE_CASE : List[Any] = bos_token_id
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> jnp.ndarray:
_SCREAMING_SNAKE_CASE : Optional[int] = jnp.full(scores.shape , -float('inf' ) )
_SCREAMING_SNAKE_CASE : int = 1 - jnp.bool_(cur_len - 1 )
_SCREAMING_SNAKE_CASE : List[str] = jnp.where(lowerCAmelCase_ , new_scores.at[:, self.bos_token_id].set(0 ) , lowerCAmelCase_ )
return scores
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Optional[Any] = max_length
_SCREAMING_SNAKE_CASE : Any = eos_token_id
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> jnp.ndarray:
_SCREAMING_SNAKE_CASE : List[Any] = jnp.full(scores.shape , -float('inf' ) )
_SCREAMING_SNAKE_CASE : Dict = 1 - jnp.bool_(cur_len - self.max_length + 1 )
_SCREAMING_SNAKE_CASE : Tuple = jnp.where(lowerCAmelCase_ , new_scores.at[:, self.eos_token_id].set(0 ) , lowerCAmelCase_ )
return scores
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or min_length < 0:
raise ValueError(F"""`min_length` has to be a positive integer, but is {min_length}""" )
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or eos_token_id < 0:
raise ValueError(F"""`eos_token_id` has to be a positive integer, but is {eos_token_id}""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = min_length
_SCREAMING_SNAKE_CASE : str = eos_token_id
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> jnp.ndarray:
# create boolean flag to decide if min length penalty should be applied
_SCREAMING_SNAKE_CASE : Tuple = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
_SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where(lowerCAmelCase_ , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , lowerCAmelCase_ )
return scores
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_SCREAMING_SNAKE_CASE : List[str] = list(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Optional[Any] = begin_index
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_SCREAMING_SNAKE_CASE : Any = 1 - jnp.bool_(cur_len - self.begin_index )
_SCREAMING_SNAKE_CASE : List[Any] = jnp.where(lowerCAmelCase_ , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , lowerCAmelCase_ )
return scores
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__( self , lowerCAmelCase_ ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : str = list(lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> jnp.ndarray:
_SCREAMING_SNAKE_CASE : Union[str, Any] = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__( self , lowerCAmelCase_ ) -> Any:
_SCREAMING_SNAKE_CASE : Optional[Any] = dict(lowerCAmelCase_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
_SCREAMING_SNAKE_CASE : Dict = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
_SCREAMING_SNAKE_CASE : Dict = force_token_array.at[index].set(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : str = jnp.intaa(lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> jnp.ndarray:
def _force_token(lowerCAmelCase_ ):
_SCREAMING_SNAKE_CASE : str = scores.shape[0]
_SCREAMING_SNAKE_CASE : List[Any] = self.force_token_array[generation_idx]
_SCREAMING_SNAKE_CASE : Dict = jnp.ones_like(lowerCAmelCase_ , dtype=scores.dtype ) * -float('inf' )
_SCREAMING_SNAKE_CASE : Optional[Any] = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
_SCREAMING_SNAKE_CASE : str = lax.dynamic_update_slice(lowerCAmelCase_ , lowerCAmelCase_ , (0, current_token) )
return new_scores
_SCREAMING_SNAKE_CASE : Optional[int] = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(lowerCAmelCase_ ) , lambda: scores , ) , )
return scores
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_SCREAMING_SNAKE_CASE : Tuple = generate_config.eos_token_id
_SCREAMING_SNAKE_CASE : str = generate_config.no_timestamps_token_id
_SCREAMING_SNAKE_CASE : Optional[int] = generate_config.no_timestamps_token_id + 1
_SCREAMING_SNAKE_CASE : str = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(lowerCAmelCase_ , 'max_initial_timestamp_index' ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = generate_config.max_initial_timestamp_index
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = model_config.vocab_size
if self.max_initial_timestamp_index is None:
_SCREAMING_SNAKE_CASE : Dict = model_config.vocab_size
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
# suppress <|notimestamps|> which is handled by without_timestamps
_SCREAMING_SNAKE_CASE : Optional[Any] = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(lowerCAmelCase_ , lowerCAmelCase_ ):
_SCREAMING_SNAKE_CASE : int = jnp.where((cur_len - self.begin_index) >= 1 , lowerCAmelCase_ , lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Tuple = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , lowerCAmelCase_ , )
_SCREAMING_SNAKE_CASE : Any = jnp.where((cur_len - self.begin_index) < 2 , lowerCAmelCase_ , lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Any = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , lowerCAmelCase_ , lowerCAmelCase_ , )
return jnp.where(
lowerCAmelCase_ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , lowerCAmelCase_ , )
_SCREAMING_SNAKE_CASE : Optional[Any] = jax.vmap(lowerCAmelCase_ )(lowerCAmelCase_ , lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : List[str] = jnp.where(cur_len == self.begin_index , lowerCAmelCase_ , lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Optional[int] = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , lowerCAmelCase_ , )
_SCREAMING_SNAKE_CASE : Tuple = self.timestamp_begin + self.max_initial_timestamp_index
_SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.where(
lowerCAmelCase_ , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , lowerCAmelCase_ , )
# if sum of probability over timestamps is above any other token, sample timestamp
_SCREAMING_SNAKE_CASE : str = jax.nn.log_softmax(lowerCAmelCase_ , axis=-1 )
def handle_cumulative_probs(lowerCAmelCase_ , lowerCAmelCase_ ):
_SCREAMING_SNAKE_CASE : Any = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
_SCREAMING_SNAKE_CASE : int = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , lowerCAmelCase_ , )
_SCREAMING_SNAKE_CASE : Dict = jax.vmap(lowerCAmelCase_ )(lowerCAmelCase_ , lowerCAmelCase_ )
return scores
| 621 | 0 |
import re
def _SCREAMING_SNAKE_CASE ( __lowercase : str ) -> bool:
"""simple docstring"""
__A = re.compile(R"""^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$""" )
if match := re.search(__lowercase , __lowercase ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("+918827897895"))
| 707 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any]=13 , UpperCamelCase_ : Optional[Any]=30 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Optional[Any]=3 , UpperCamelCase_ : int=True , UpperCamelCase_ : str=True , UpperCamelCase_ : str=32 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : List[Any]=4 , UpperCamelCase_ : List[str]=37 , UpperCamelCase_ : Any="gelu" , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : Optional[Any]=0.1 , UpperCamelCase_ : Any=10 , UpperCamelCase_ : str=0.02 , UpperCamelCase_ : Dict=3 , UpperCamelCase_ : int=None , ):
"""simple docstring"""
__A = parent
__A = batch_size
__A = image_size
__A = patch_size
__A = num_channels
__A = is_training
__A = use_labels
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = type_sequence_label_size
__A = initializer_range
__A = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__A = (image_size // patch_size) ** 2
__A = num_patches + 1
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int] ):
"""simple docstring"""
__A = TFViTModel(config=UpperCamelCase_ )
__A = model(UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
__A = self.image_size // 2
__A = pixel_values[:, :, :image_size, :image_size]
__A = model(UpperCamelCase_ , interpolate_pos_encoding=UpperCamelCase_ , training=UpperCamelCase_ )
__A = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Tuple , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] ):
"""simple docstring"""
__A = self.type_sequence_label_size
__A = TFViTForImageClassification(UpperCamelCase_ )
__A = model(UpperCamelCase_ , labels=UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
__A = self.image_size // 2
__A = pixel_values[:, :, :image_size, :image_size]
__A = model(UpperCamelCase_ , interpolate_pos_encoding=UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__A = 1
__A = TFViTForImageClassification(UpperCamelCase_ )
__A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
__A = self.prepare_config_and_inputs()
__A , __A , __A = config_and_inputs
__A = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __lowercase ( lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
SCREAMING_SNAKE_CASE = (
{"feature-extraction": TFViTModel, "image-classification": TFViTForImageClassification}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
__A = TFViTModelTester(self )
__A = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , tf.keras.layers.Layer ) )
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(UpperCamelCase_ )
__A = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
@slow
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
__A = TFViTModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(UpperCamelCase_ )
def _SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
__A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__A = TFViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=UpperCamelCase_ , return_tensors="""tf""" )
# forward pass
__A = model(**UpperCamelCase_ )
# verify the logits
__A = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
__A = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] , UpperCamelCase_ , atol=1e-4 )
| 199 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
__lowerCamelCase : int = ViTImageProcessor if is_vision_available() else None
@property
def UpperCAmelCase ( self : List[str] ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
a__ : Optional[Any] = (3, 32, 1_28)
a__ : Any = tempfile.mkdtemp()
# fmt: off
a__ : str = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
a__ : List[Any] = dict(zip(a_ , range(len(a_ ) ) ) )
a__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
a__ : Any = {
"do_normalize": False,
"do_resize": True,
"image_processor_type": "ViTImageProcessor",
"resample": 3,
"size": {"height": 32, "width": 1_28},
}
a__ : Tuple = os.path.join(self.tmpdirname , a_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(a_ , a_ )
def UpperCAmelCase ( self : List[str] , **a_ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **a_ )
def UpperCAmelCase ( self : List[Any] , **a_ : Union[str, Any] ) -> Dict:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **a_ )
def UpperCAmelCase ( self : Dict ) -> int:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self : List[str] ) -> Any:
'''simple docstring'''
a__ : Any = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )
a__ : List[str] = Image.fromarray(np.moveaxis(a_ , 0 , -1 ) )
return image_input
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
a__ : List[Any] = self.get_tokenizer()
a__ : Any = self.get_image_processor()
a__ : Optional[int] = MgpstrProcessor(tokenizer=a_ , image_processor=a_ )
processor.save_pretrained(self.tmpdirname )
a__ : Optional[Any] = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=a_ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , a_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , a_ )
def UpperCAmelCase ( self : Union[str, Any] ) -> str:
'''simple docstring'''
a__ : Optional[Any] = self.get_tokenizer()
a__ : Tuple = self.get_image_processor()
a__ : Optional[int] = MgpstrProcessor(tokenizer=a_ , image_processor=a_ )
processor.save_pretrained(self.tmpdirname )
a__ : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
a__ : Any = self.get_image_processor(do_normalize=a_ , padding_value=1.0 )
a__ : Dict = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=a_ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , a_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a_ )
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
'''simple docstring'''
a__ : str = self.get_image_processor()
a__ : Union[str, Any] = self.get_tokenizer()
a__ : Any = MgpstrProcessor(tokenizer=a_ , image_processor=a_ )
a__ : List[str] = self.prepare_image_inputs()
a__ : List[Any] = image_processor(a_ , return_tensors="np" )
a__ : Optional[Any] = processor(images=a_ , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self : Tuple ) -> int:
'''simple docstring'''
a__ : Optional[Any] = self.get_image_processor()
a__ : List[Any] = self.get_tokenizer()
a__ : int = MgpstrProcessor(tokenizer=a_ , image_processor=a_ )
a__ : List[str] = "test"
a__ : Any = processor(text=a_ )
a__ : Tuple = tokenizer(a_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
a__ : Tuple = self.get_image_processor()
a__ : List[str] = self.get_tokenizer()
a__ : List[Any] = MgpstrProcessor(tokenizer=a_ , image_processor=a_ )
a__ : str = "test"
a__ : str = self.prepare_image_inputs()
a__ : Any = processor(text=a_ , images=a_ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "labels"] )
# test if it raises when no input is passed
with pytest.raises(a_ ):
processor()
def UpperCAmelCase ( self : List[str] ) -> int:
'''simple docstring'''
a__ : Any = self.get_image_processor()
a__ : Tuple = self.get_tokenizer()
a__ : Dict = MgpstrProcessor(tokenizer=a_ , image_processor=a_ )
a__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
a__ : Any = processor.char_decode(a_ )
a__ : str = tokenizer.batch_decode(a_ )
a__ : Union[str, Any] = [seq.replace(" " , "" ) for seq in decoded_tok]
self.assertListEqual(a_ , a_ )
def UpperCAmelCase ( self : List[Any] ) -> Dict:
'''simple docstring'''
a__ : List[str] = self.get_image_processor()
a__ : Any = self.get_tokenizer()
a__ : Any = MgpstrProcessor(tokenizer=a_ , image_processor=a_ )
a__ : str = None
a__ : Optional[Any] = self.prepare_image_inputs()
a__ : Optional[int] = processor(text=a_ , images=a_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
a__ : List[Any] = self.get_image_processor()
a__ : List[str] = self.get_tokenizer()
a__ : Optional[Any] = MgpstrProcessor(tokenizer=a_ , image_processor=a_ )
a__ : List[str] = torch.randn(1 , 27 , 38 )
a__ : Tuple = torch.randn(1 , 27 , 5_02_57 )
a__ : List[Any] = torch.randn(1 , 27 , 3_05_22 )
a__ : Optional[Any] = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"] ) | 642 |
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> tuple[int, int]:
'''simple docstring'''
if b == 0:
return (1, 0)
((a__) , (a__)) : List[Any] = extended_euclid(lowerCAmelCase__ , a % b )
a__ : str = a // b
return (y, x - k * y)
def lowercase__ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
((a__) , (a__)) : Tuple = extended_euclid(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : List[str] = na * na
a__ : Union[str, Any] = ra * x * na + ra * y * na
return (n % m + m) % m
def lowercase__ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
((a__) , (a__)) : Optional[Any] = extended_euclid(lowerCAmelCase__ , lowerCAmelCase__ )
if b < 0:
a__ : Optional[int] = (b % n + n) % n
return b
def lowercase__ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
a__ , a__ : List[Any] = invert_modulo(lowerCAmelCase__ , lowerCAmelCase__ ), invert_modulo(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Dict = na * na
a__ : Any = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True) | 642 | 1 |
from __future__ import annotations
from collections.abc import Iterator
class lowerCAmelCase__ :
def __init__( self : Any , _A : List[Any]):
A__ : Optional[Any] = value
A__ : Node | None = None
A__ : Node | None = None
class lowerCAmelCase__ :
def __init__( self : Dict , _A : Optional[Any]):
A__ : str = tree
def _lowercase ( self : str , _A : Union[str, Any]):
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left) + self.depth_first_search(node.right)
)
def __iter__( self : List[Any]):
yield self.depth_first_search(self.tree)
if __name__ == "__main__":
import doctest
doctest.testmod() | 704 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def snake_case__ ( __lowercase , __lowercase ) -> int:
"""simple docstring"""
A__ : List[str] = checkpoint
A__ : Dict = {}
A__ : Union[str, Any] = vae_state_dict["encoder.conv_in.weight"]
A__ : int = vae_state_dict["encoder.conv_in.bias"]
A__ : List[Any] = vae_state_dict["encoder.conv_out.weight"]
A__ : Optional[int] = vae_state_dict["encoder.conv_out.bias"]
A__ : int = vae_state_dict["encoder.norm_out.weight"]
A__ : Optional[int] = vae_state_dict["encoder.norm_out.bias"]
A__ : Optional[int] = vae_state_dict["decoder.conv_in.weight"]
A__ : Optional[Any] = vae_state_dict["decoder.conv_in.bias"]
A__ : str = vae_state_dict["decoder.conv_out.weight"]
A__ : Optional[int] = vae_state_dict["decoder.conv_out.bias"]
A__ : Union[str, Any] = vae_state_dict["decoder.norm_out.weight"]
A__ : Optional[Any] = vae_state_dict["decoder.norm_out.bias"]
A__ : Optional[Any] = vae_state_dict["quant_conv.weight"]
A__ : Optional[Any] = vae_state_dict["quant_conv.bias"]
A__ : Dict = vae_state_dict["post_quant_conv.weight"]
A__ : Union[str, Any] = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
A__ : str = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
A__ : Optional[int] = {
layer_id: [key for key in vae_state_dict if F'down.{layer_id}' in key] for layer_id in range(__lowercase )
}
# Retrieves the keys for the decoder up blocks only
A__ : Any = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
A__ : Any = {
layer_id: [key for key in vae_state_dict if F'up.{layer_id}' in key] for layer_id in range(__lowercase )
}
for i in range(__lowercase ):
A__ : Any = [key for key in down_blocks[i] if F'down.{i}' in key and F'down.{i}.downsample' not in key]
if F'encoder.down.{i}.downsample.conv.weight' in vae_state_dict:
A__ : Any = vae_state_dict.pop(
F'encoder.down.{i}.downsample.conv.weight' )
A__ : Optional[int] = vae_state_dict.pop(
F'encoder.down.{i}.downsample.conv.bias' )
A__ : Any = renew_vae_resnet_paths(__lowercase )
A__ : int = {"old": F'down.{i}.block', "new": F'down_blocks.{i}.resnets'}
assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase )
A__ : int = [key for key in vae_state_dict if "encoder.mid.block" in key]
A__ : Any = 2
for i in range(1 , num_mid_res_blocks + 1 ):
A__ : Optional[int] = [key for key in mid_resnets if F'encoder.mid.block_{i}' in key]
A__ : Optional[Any] = renew_vae_resnet_paths(__lowercase )
A__ : int = {"old": F'mid.block_{i}', "new": F'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase )
A__ : str = [key for key in vae_state_dict if "encoder.mid.attn" in key]
A__ : List[Any] = renew_vae_attention_paths(__lowercase )
A__ : Optional[int] = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase )
conv_attn_to_linear(__lowercase )
for i in range(__lowercase ):
A__ : Dict = num_up_blocks - 1 - i
A__ : int = [
key for key in up_blocks[block_id] if F'up.{block_id}' in key and F'up.{block_id}.upsample' not in key
]
if F'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict:
A__ : Tuple = vae_state_dict[
F'decoder.up.{block_id}.upsample.conv.weight'
]
A__ : List[Any] = vae_state_dict[
F'decoder.up.{block_id}.upsample.conv.bias'
]
A__ : str = renew_vae_resnet_paths(__lowercase )
A__ : List[str] = {"old": F'up.{block_id}.block', "new": F'up_blocks.{i}.resnets'}
assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase )
A__ : int = [key for key in vae_state_dict if "decoder.mid.block" in key]
A__ : Optional[int] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
A__ : Union[str, Any] = [key for key in mid_resnets if F'decoder.mid.block_{i}' in key]
A__ : Dict = renew_vae_resnet_paths(__lowercase )
A__ : int = {"old": F'mid.block_{i}', "new": F'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase )
A__ : Optional[Any] = [key for key in vae_state_dict if "decoder.mid.attn" in key]
A__ : Union[str, Any] = renew_vae_attention_paths(__lowercase )
A__ : List[str] = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase )
conv_attn_to_linear(__lowercase )
return new_checkpoint
def snake_case__ ( __lowercase , __lowercase , ) -> Tuple:
"""simple docstring"""
A__ : List[str] = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
A__ : Dict = io.BytesIO(r.content )
A__ : str = OmegaConf.load(__lowercase )
A__ : Any = 5_1_2
A__ : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
A__ : str = {}
with safe_open(__lowercase , framework="pt" , device="cpu" ) as f:
for key in f.keys():
A__ : List[str] = f.get_tensor(__lowercase )
else:
A__ : str = torch.load(__lowercase , map_location=__lowercase )["state_dict"]
# Convert the VAE model.
A__ : List[str] = create_vae_diffusers_config(__lowercase , image_size=__lowercase )
A__ : List[Any] = custom_convert_ldm_vae_checkpoint(__lowercase , __lowercase )
A__ : Dict = AutoencoderKL(**__lowercase )
vae.load_state_dict(__lowercase )
vae.save_pretrained(__lowercase )
if __name__ == "__main__":
snake_case : List[Any] = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
snake_case : int = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path) | 182 | 0 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class __A ( UpperCamelCase__ ):
UpperCamelCase = 42
UpperCamelCase = None
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=0.9_9_9 , lowerCamelCase="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" )
__magic_name__ : List[str] =[]
for i in range(lowerCamelCase ):
__magic_name__ : int =i / num_diffusion_timesteps
__magic_name__ : Optional[int] =(i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase ) / alpha_bar_fn(lowerCamelCase ) , lowerCamelCase ) )
return torch.tensor(lowerCamelCase , dtype=torch.floataa )
class __A ( UpperCamelCase__ , UpperCamelCase__ ):
@register_to_config
def __init__( self :Union[str, Any] , __snake_case :int = 10_00 , __snake_case :str = "fixed_small_log" , __snake_case :bool = True , __snake_case :Optional[float] = 1.0 , __snake_case :str = "epsilon" , __snake_case :str = "squaredcos_cap_v2" , ):
'''simple docstring'''
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("""UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'""" )
__magic_name__ : Optional[int] =betas_for_alpha_bar(__snake_case )
__magic_name__ : Dict =1.0 - self.betas
__magic_name__ : Tuple =torch.cumprod(self.alphas , dim=0 )
__magic_name__ : List[str] =torch.tensor(1.0 )
# standard deviation of the initial noise distribution
__magic_name__ : Optional[int] =1.0
# setable values
__magic_name__ : str =None
__magic_name__ : List[str] =torch.from_numpy(np.arange(0 , __snake_case )[::-1].copy() )
__magic_name__ : Dict =variance_type
def A__ ( self :List[str] , __snake_case :torch.FloatTensor , __snake_case :Optional[int] = None ):
'''simple docstring'''
return sample
def A__ ( self :str , __snake_case :int , __snake_case :Union[str, torch.device] = None ):
'''simple docstring'''
__magic_name__ : Tuple =num_inference_steps
__magic_name__ : List[str] =(self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
__magic_name__ : Dict =(np.arange(0 , __snake_case ) * step_ratio).round()[::-1].copy().astype(np.intaa )
__magic_name__ : Union[str, Any] =torch.from_numpy(__snake_case ).to(__snake_case )
def A__ ( self :List[Any] , __snake_case :str , __snake_case :Any=None , __snake_case :List[Any]=None , __snake_case :Union[str, Any]=None ):
'''simple docstring'''
if prev_timestep is None:
__magic_name__ : List[Any] =t - 1
__magic_name__ : Any =self.alphas_cumprod[t]
__magic_name__ : List[str] =self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__magic_name__ : int =1 - alpha_prod_t
__magic_name__ : str =1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__magic_name__ : Union[str, Any] =self.betas[t]
else:
__magic_name__ : Optional[Any] =1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__magic_name__ : Optional[Any] =beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
__magic_name__ : Dict =self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
__magic_name__ : Optional[int] =torch.log(torch.clamp(__snake_case , min=1E-20 ) )
__magic_name__ : Dict =torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
__magic_name__ : str =variance.log()
__magic_name__ : str =beta.log()
__magic_name__ : List[str] =(predicted_variance + 1) / 2
__magic_name__ : str =frac * max_log + (1 - frac) * min_log
return variance
def A__ ( self :List[str] , __snake_case :torch.FloatTensor , __snake_case :int , __snake_case :torch.FloatTensor , __snake_case :Optional[int] = None , __snake_case :Dict=None , __snake_case :bool = True , ):
'''simple docstring'''
__magic_name__ : List[str] =timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
__magic_name__ , __magic_name__ : int =torch.split(__snake_case , sample.shape[1] , dim=1 )
else:
__magic_name__ : str =None
# 1. compute alphas, betas
if prev_timestep is None:
__magic_name__ : Optional[Any] =t - 1
__magic_name__ : Union[str, Any] =self.alphas_cumprod[t]
__magic_name__ : Optional[int] =self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__magic_name__ : Dict =1 - alpha_prod_t
__magic_name__ : Optional[int] =1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__magic_name__ : Optional[Any] =self.betas[t]
__magic_name__ : Any =self.alphas[t]
else:
__magic_name__ : List[Any] =1 - alpha_prod_t / alpha_prod_t_prev
__magic_name__ : Tuple =1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__magic_name__ : Optional[Any] =(sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__magic_name__ : str =model_output
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"
""" for the UnCLIPScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__magic_name__ : int =torch.clamp(
__snake_case , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__magic_name__ : Optional[Any] =(alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
__magic_name__ : Dict =alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__magic_name__ : Any =pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__magic_name__ : Any =0
if t > 0:
__magic_name__ : str =randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=__snake_case , device=model_output.device )
__magic_name__ : Optional[Any] =self._get_variance(
__snake_case , predicted_variance=__snake_case , prev_timestep=__snake_case , )
if self.variance_type == "fixed_small_log":
__magic_name__ : List[str] =variance
elif self.variance_type == "learned_range":
__magic_name__ : int =(0.5 * variance).exp()
else:
raise ValueError(
f"variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"
""" for the UnCLIPScheduler.""" )
__magic_name__ : Union[str, Any] =variance * variance_noise
__magic_name__ : str =pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=__snake_case , pred_original_sample=__snake_case )
def A__ ( self :List[str] , __snake_case :torch.FloatTensor , __snake_case :torch.FloatTensor , __snake_case :torch.IntTensor , ):
'''simple docstring'''
__magic_name__ : str =self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
__magic_name__ : Union[str, Any] =timesteps.to(original_samples.device )
__magic_name__ : List[Any] =alphas_cumprod[timesteps] ** 0.5
__magic_name__ : Dict =sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
__magic_name__ : Union[str, Any] =sqrt_alpha_prod.unsqueeze(-1 )
__magic_name__ : List[Any] =(1 - alphas_cumprod[timesteps]) ** 0.5
__magic_name__ : str =sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
__magic_name__ : Union[str, Any] =sqrt_one_minus_alpha_prod.unsqueeze(-1 )
__magic_name__ : List[str] =sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 21 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""XGLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""XGLMTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XGLMForCausalLM""",
"""XGLMModel""",
"""XGLMPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""FlaxXGLMForCausalLM""",
"""FlaxXGLMModel""",
"""FlaxXGLMPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXGLMForCausalLM""",
"""TFXGLMModel""",
"""TFXGLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 384 | 0 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
a = logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class a_ ( unittest.TestCase ):
def UpperCamelCase ( self : List[str] , a_ : Path , a_ : Union[str, None] = None , a_ : Union[List[str], None] = None , a_ : Union[str, List[str], None] = None , a_ : bool = True , ) -> List[str]:
snake_case: Optional[Any] =[file for file in os.listdir(a_ ) if os.path.isfile(os.path.join(a_ , a_ ) )]
if identifier is not None:
snake_case: Dict =[file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(a_ , a_ ):
for n_ in n_identifier:
snake_case: Tuple =[file for file in files if n_ not in file]
else:
snake_case: Optional[Any] =[file for file in files if n_identifier not in file]
snake_case: int =ignore_files or []
ignore_files.append('__init__.py' )
snake_case: Dict =[file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , a_ )
if only_modules:
snake_case: Optional[int] =file.split('.' )[0]
try:
snake_case: str =getattr(a_ , a_ )
snake_case: Dict =doctest.DocTestSuite(a_ )
snake_case: Dict =unittest.TextTestRunner().run(a_ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'''{module_identifier} is not a module.''' )
else:
snake_case: List[str] =doctest.testfile(str('..' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
snake_case: List[Any] =Path('src/transformers' )
snake_case: List[Any] ="modeling"
snake_case: Tuple =[
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(a_ , identifier=a_ , ignore_files=a_ )
def UpperCamelCase ( self : Any ) -> Tuple:
snake_case: List[Any] =Path('src/transformers' )
snake_case: List[Any] ="tokenization"
self.analyze_directory(a_ , identifier=a_ )
def UpperCamelCase ( self : Any ) -> Tuple:
snake_case: Optional[int] =Path('src/transformers' )
snake_case: Union[str, Any] ="configuration"
self.analyze_directory(a_ , identifier=a_ )
def UpperCamelCase ( self : str ) -> Optional[Any]:
snake_case: Optional[Any] =Path('src/transformers' )
snake_case: Optional[int] =["configuration", "modeling", "tokenization"]
self.analyze_directory(a_ , n_identifier=a_ )
def UpperCamelCase ( self : Optional[int] ) -> Any:
snake_case: Any =Path('docs/source' )
snake_case: List[str] =["favicon.ico"]
self.analyze_directory(a_ , ignore_files=a_ , only_modules=a_ )
| 704 |
'''simple docstring'''
import argparse
import copy
def a_ ( __UpperCAmelCase ) -> Dict:
"""simple docstring"""
snake_case: int ={}
with open(__UpperCAmelCase ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
snake_case: Any =[]
_list.append([line.split()[1], line.split()[2]] )
snake_case: Tuple =_list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
snake_case: Any =[]
_list.append([line.split()[0], line.split()[2]] )
snake_case: List[str] =_list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def a_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
with open(__UpperCAmelCase ) as f:
snake_case: Optional[Any] =f.read(1 )
snake_case: Tuple =start_node
snake_case: Union[str, Any] =[]
snake_case: Any =start_node
snake_case: Optional[int] =0
while visiting not in first_solution:
snake_case: List[str] =1_00_00
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__UpperCAmelCase ) and k[0] not in first_solution:
snake_case: Tuple =k[1]
snake_case: List[Any] =k[0]
first_solution.append(__UpperCAmelCase )
snake_case: List[Any] =distance_of_first_solution + int(__UpperCAmelCase )
snake_case: int =best_node
first_solution.append(__UpperCAmelCase )
snake_case: str =0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
snake_case: List[Any] =(
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_00_00
)
return first_solution, distance_of_first_solution
def a_ ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
"""simple docstring"""
snake_case: Optional[Any] =[]
for n in solution[1:-1]:
snake_case: Any =solution.index(__UpperCAmelCase )
for kn in solution[1:-1]:
snake_case: int =solution.index(__UpperCAmelCase )
if n == kn:
continue
snake_case: str =copy.deepcopy(__UpperCAmelCase )
snake_case: Dict =kn
snake_case: Optional[Any] =n
snake_case: Dict =0
for k in _tmp[:-1]:
snake_case: Optional[int] =_tmp[_tmp.index(__UpperCAmelCase ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
snake_case: int =distance + int(i[1] )
_tmp.append(__UpperCAmelCase )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
snake_case: List[Any] =len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __UpperCAmelCase : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def a_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
snake_case: int =1
snake_case: Dict =first_solution
snake_case: str =[]
snake_case: int =distance_of_first_solution
snake_case: str =solution
while count <= iters:
snake_case: int =find_neighborhood(__UpperCAmelCase , __UpperCAmelCase )
snake_case: Union[str, Any] =0
snake_case: Dict =neighborhood[index_of_best_solution]
snake_case: List[str] =len(__UpperCAmelCase ) - 1
snake_case: int =False
while not found:
snake_case: str =0
while i < len(__UpperCAmelCase ):
if best_solution[i] != solution[i]:
snake_case: Any =best_solution[i]
snake_case: Optional[int] =solution[i]
break
snake_case: str =i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
snake_case: Dict =True
snake_case: Union[str, Any] =best_solution[:-1]
snake_case: Union[str, Any] =neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
snake_case: List[Any] =cost
snake_case: Dict =solution
else:
snake_case: str =index_of_best_solution + 1
snake_case: List[str] =neighborhood[index_of_best_solution]
if len(__UpperCAmelCase ) >= size:
tabu_list.pop(0 )
snake_case: List[str] =count + 1
return best_solution_ever, best_cost
def a_ ( __UpperCAmelCase=None ) -> int:
"""simple docstring"""
snake_case: Any =generate_neighbours(args.File )
snake_case , snake_case: Dict =generate_first_solution(
args.File , __UpperCAmelCase )
snake_case , snake_case: Optional[Any] =tabu_search(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , args.Iterations , args.Size , )
print(f'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
a = argparse.ArgumentParser(description='Tabu Search')
parser.add_argument(
'-f',
'--File',
type=str,
help='Path to the file containing the data',
required=True,
)
parser.add_argument(
'-i',
'--Iterations',
type=int,
help='How many iterations the algorithm should perform',
required=True,
)
parser.add_argument(
'-s', '--Size', type=int, help='Size of the tabu list', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 347 | 0 |
"""simple docstring"""
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
a_ : int = False
a_ : Dict = False
def UpperCAmelCase ( A__: Optional[int] ) -> Optional[Any]:
return TrainCommand(__a )
class __lowercase( lowercase__ ):
'''simple docstring'''
@staticmethod
def snake_case_ ( __a ):
__lowerCamelCase : Tuple = parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=_lowerCamelCase , required=_lowerCamelCase , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=_lowerCamelCase , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=_lowerCamelCase , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=_lowerCamelCase , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=_lowerCamelCase , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=_lowerCamelCase , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=_lowerCamelCase , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=_lowerCamelCase , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=_lowerCamelCase , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=_lowerCamelCase , default=32 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=_lowerCamelCase , default=64 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=_lowerCamelCase , default=3E-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=_lowerCamelCase , default=1E-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=_lowerCamelCase )
def __init__( self , __a ):
__lowerCamelCase : Any = logging.get_logger('transformers-cli/training' )
__lowerCamelCase : int = 'tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = args.output
__lowerCamelCase : List[Any] = args.column_label
__lowerCamelCase : List[str] = args.column_text
__lowerCamelCase : Any = args.column_id
self.logger.info(f'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
__lowerCamelCase : Tuple = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'''Loading dataset from {args.train_data}''' )
__lowerCamelCase : str = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__lowerCamelCase : int = None
if args.validation_data:
self.logger.info(f'''Loading validation dataset from {args.validation_data}''' )
__lowerCamelCase : List[Any] = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__lowerCamelCase : Any = args.validation_split
__lowerCamelCase : Optional[int] = args.train_batch_size
__lowerCamelCase : int = args.valid_batch_size
__lowerCamelCase : Optional[int] = args.learning_rate
__lowerCamelCase : Dict = args.adam_epsilon
def snake_case_ ( self ):
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def snake_case_ ( self ):
raise NotImplementedError
def snake_case_ ( self ):
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 594 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : int ):
A__ = tempfile.mkdtemp()
A__ = BlipImageProcessor()
A__ = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-BertModel''' )
A__ = BlipProcessor(_lowerCamelCase , _lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def A__ ( self : List[Any] , **_lowerCamelCase : Any ):
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase ).tokenizer
def A__ ( self : Optional[Any] , **_lowerCamelCase : List[Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase ).image_processor
def A__ ( self : str ):
shutil.rmtree(self.tmpdirname )
def A__ ( self : str ):
A__ = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A__ ( self : Union[str, Any] ):
A__ = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
A__ = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 )
A__ = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def A__ ( self : Dict ):
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = BlipProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(_lowerCamelCase , return_tensors='''np''' )
A__ = processor(images=_lowerCamelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def A__ ( self : Optional[int] ):
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = BlipProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A__ = '''lower newer'''
A__ = processor(text=_lowerCamelCase )
A__ = tokenizer(_lowerCamelCase , return_token_type_ids=_lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A__ ( self : Any ):
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = BlipProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A__ = '''lower newer'''
A__ = self.prepare_image_inputs()
A__ = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def A__ ( self : Optional[int] ):
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = BlipProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.batch_decode(_lowerCamelCase )
A__ = tokenizer.batch_decode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def A__ ( self : Optional[Any] ):
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = BlipProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A__ = '''lower newer'''
A__ = self.prepare_image_inputs()
A__ = processor(text=_lowerCamelCase , images=_lowerCamelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 571 | 0 |
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class __snake_case ( __a):
"""simple docstring"""
lowercase = 'char'
lowercase = 'bpe'
lowercase = 'wp'
__A : List[Any] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class __snake_case ( __a):
"""simple docstring"""
lowercase = ['image_processor', 'char_tokenizer']
lowercase = 'ViTImageProcessor'
lowercase = 'MgpstrTokenizer'
def __init__( self : Tuple , lowerCamelCase : Any=None , lowerCamelCase : Tuple=None , **lowerCamelCase : Dict ) -> Any:
lowerCAmelCase_ : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , a_ , )
lowerCAmelCase_ : List[str] = kwargs.pop("""feature_extractor""" )
lowerCAmelCase_ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
lowerCAmelCase_ : List[str] = tokenizer
lowerCAmelCase_ : Tuple = AutoTokenizer.from_pretrained("""gpt2""" )
lowerCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""bert-base-uncased""" )
super().__init__(a_ , a_ )
def __call__( self : Any , lowerCamelCase : Any=None , lowerCamelCase : List[Any]=None , lowerCamelCase : Any=None , **lowerCamelCase : Tuple ) -> Tuple:
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
lowerCAmelCase_ : int = self.image_processor(a_ , return_tensors=a_ , **a_ )
if text is not None:
lowerCAmelCase_ : Union[str, Any] = self.char_tokenizer(a_ , return_tensors=a_ , **a_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCAmelCase_ : Optional[int] = encodings["""input_ids"""]
return inputs
def __lowercase ( self : str , lowerCamelCase : Union[str, Any] ) -> List[str]:
lowerCAmelCase_ : Union[str, Any] = sequences
lowerCAmelCase_ : List[Any] = char_preds.size(0 )
lowerCAmelCase_ : Optional[int] = self._decode_helper(a_ , """char""" )
lowerCAmelCase_ : Union[str, Any] = self._decode_helper(a_ , """bpe""" )
lowerCAmelCase_ : List[str] = self._decode_helper(a_ , """wp""" )
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ : List[Any] = []
for i in range(a_ ):
lowerCAmelCase_ : Any = [char_scores[i], bpe_scores[i], wp_scores[i]]
lowerCAmelCase_ : str = [char_strs[i], bpe_strs[i], wp_strs[i]]
lowerCAmelCase_ : Union[str, Any] = scores.index(max(a_ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
lowerCAmelCase_ : Tuple = {}
lowerCAmelCase_ : List[str] = final_strs
lowerCAmelCase_ : Optional[int] = final_scores
lowerCAmelCase_ : Optional[int] = char_strs
lowerCAmelCase_ : Optional[Any] = bpe_strs
lowerCAmelCase_ : Dict = wp_strs
return out
def __lowercase ( self : Any , lowerCamelCase : List[Any] , lowerCamelCase : Any ) -> List[Any]:
if format == DecodeType.CHARACTER:
lowerCAmelCase_ : int = self.char_decode
lowerCAmelCase_ : Optional[int] = 1
lowerCAmelCase_ : str = """[s]"""
elif format == DecodeType.BPE:
lowerCAmelCase_ : Dict = self.bpe_decode
lowerCAmelCase_ : Any = 2
lowerCAmelCase_ : str = """#"""
elif format == DecodeType.WORDPIECE:
lowerCAmelCase_ : Optional[int] = self.wp_decode
lowerCAmelCase_ : Optional[int] = 1_02
lowerCAmelCase_ : List[str] = """[SEP]"""
else:
raise ValueError(F'Format {format} is not supported.' )
lowerCAmelCase_ : List[str] = [], []
lowerCAmelCase_ : str = pred_logits.size(0 )
lowerCAmelCase_ : List[str] = pred_logits.size(1 )
lowerCAmelCase_ : List[Any] = pred_logits.topk(1 , dim=-1 , largest=a_ , sorted=a_ )
lowerCAmelCase_ : Optional[int] = preds_index.view(-1 , a_ )[:, 1:]
lowerCAmelCase_ : Tuple = decoder(a_ )
lowerCAmelCase_ : Any = torch.nn.functional.softmax(a_ , dim=2 ).max(dim=2 )
lowerCAmelCase_ : Dict = preds_max_prob[:, 1:]
for index in range(a_ ):
lowerCAmelCase_ : Optional[int] = preds_str[index].find(a_ )
lowerCAmelCase_ : Dict = preds_str[index][:pred_eos]
lowerCAmelCase_ : List[Any] = preds_index[index].cpu().tolist()
lowerCAmelCase_ : Tuple = pred_index.index(a_ ) if eos_token in pred_index else -1
lowerCAmelCase_ : str = preds_max_prob[index][: pred_eos_index + 1]
lowerCAmelCase_ : Optional[int] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(a_ )
conf_scores.append(a_ )
return dec_strs, conf_scores
def __lowercase ( self : Union[str, Any] , lowerCamelCase : List[Any] ) -> List[Any]:
lowerCAmelCase_ : Union[str, Any] = [seq.replace(""" """ , """""" ) for seq in self.char_tokenizer.batch_decode(a_ )]
return decode_strs
def __lowercase ( self : Optional[Any] , lowerCamelCase : Dict ) -> List[Any]:
return self.bpe_tokenizer.batch_decode(a_ )
def __lowercase ( self : List[Any] , lowerCamelCase : Optional[Any] ) -> List[str]:
lowerCAmelCase_ : str = [seq.replace(""" """ , """""" ) for seq in self.wp_tokenizer.batch_decode(a_ )]
return decode_strs
| 706 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = (EulerDiscreteScheduler,)
lowercase = 10
def __lowercase ( self : Any , **lowerCamelCase : Optional[Any] ) -> Optional[Any]:
lowerCAmelCase_ : int = {
"""num_train_timesteps""": 11_00,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**lowerCamelCase )
return config
def __lowercase ( self : Tuple ) -> Tuple:
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase )
def __lowercase ( self : Optional[Any] ) -> str:
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCamelCase , beta_end=lowerCamelCase )
def __lowercase ( self : Union[str, Any] ) -> Dict:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCamelCase )
def __lowercase ( self : Union[str, Any] ) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase )
def __lowercase ( self : Optional[int] ) -> Dict:
lowerCAmelCase_ : List[str] = self.scheduler_classes[0]
lowerCAmelCase_ : int = self.get_scheduler_config()
lowerCAmelCase_ : Union[str, Any] = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase_ : int = torch.manual_seed(0 )
lowerCAmelCase_ : List[str] = self.dummy_model()
lowerCAmelCase_ : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase_ : int = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase_ : str = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : Optional[Any] = model(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : int = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
lowerCAmelCase_ : Optional[Any] = output.prev_sample
lowerCAmelCase_ : Optional[int] = torch.sum(torch.abs(lowerCamelCase ) )
lowerCAmelCase_ : Any = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 10.0_807 ) < 1E-2
assert abs(result_mean.item() - 0.0_131 ) < 1E-3
def __lowercase ( self : Any ) -> List[str]:
lowerCAmelCase_ : int = self.scheduler_classes[0]
lowerCAmelCase_ : Tuple = self.get_scheduler_config(prediction_type="""v_prediction""" )
lowerCAmelCase_ : Optional[Any] = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase_ : Optional[Any] = torch.manual_seed(0 )
lowerCAmelCase_ : List[str] = self.dummy_model()
lowerCAmelCase_ : str = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase_ : Optional[Any] = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase_ : Dict = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : Tuple = model(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : Optional[Any] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
lowerCAmelCase_ : Any = output.prev_sample
lowerCAmelCase_ : str = torch.sum(torch.abs(lowerCamelCase ) )
lowerCAmelCase_ : Optional[int] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 0.0_002 ) < 1E-2
assert abs(result_mean.item() - 2.2_676E-06 ) < 1E-3
def __lowercase ( self : Optional[int] ) -> List[Any]:
lowerCAmelCase_ : List[Any] = self.scheduler_classes[0]
lowerCAmelCase_ : Optional[int] = self.get_scheduler_config()
lowerCAmelCase_ : List[str] = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase )
lowerCAmelCase_ : Tuple = torch.manual_seed(0 )
lowerCAmelCase_ : Dict = self.dummy_model()
lowerCAmelCase_ : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowerCAmelCase_ : List[Any] = sample.to(lowerCamelCase )
for t in scheduler.timesteps:
lowerCAmelCase_ : Optional[int] = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : str = model(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : List[Any] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
lowerCAmelCase_ : Optional[int] = output.prev_sample
lowerCAmelCase_ : Dict = torch.sum(torch.abs(lowerCamelCase ) )
lowerCAmelCase_ : Optional[Any] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 10.0_807 ) < 1E-2
assert abs(result_mean.item() - 0.0_131 ) < 1E-3
def __lowercase ( self : int ) -> int:
lowerCAmelCase_ : Optional[Any] = self.scheduler_classes[0]
lowerCAmelCase_ : Union[str, Any] = self.get_scheduler_config()
lowerCAmelCase_ : Optional[Any] = scheduler_class(**lowerCamelCase , use_karras_sigmas=lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase )
lowerCAmelCase_ : List[Any] = torch.manual_seed(0 )
lowerCAmelCase_ : Tuple = self.dummy_model()
lowerCAmelCase_ : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowerCAmelCase_ : List[Any] = sample.to(lowerCamelCase )
for t in scheduler.timesteps:
lowerCAmelCase_ : Dict = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : str = model(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : Optional[Any] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
lowerCAmelCase_ : Dict = output.prev_sample
lowerCAmelCase_ : Tuple = torch.sum(torch.abs(lowerCamelCase ) )
lowerCAmelCase_ : Tuple = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1E-2
assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1E-3
| 398 | 0 |
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = CpmAntTokenizer
SCREAMING_SNAKE_CASE : Dict = False
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
super().setUp()
__lowercase = [
'''<d>''',
'''</d>''',
'''<s>''',
'''</s>''',
'''</_>''',
'''<unk>''',
'''<pad>''',
'''</n>''',
'''我''',
'''是''',
'''C''',
'''P''',
'''M''',
'''A''',
'''n''',
'''t''',
]
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
@tooslow
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' )
__lowercase = '''今天天气真好!'''
__lowercase = ['''今天''', '''天气''', '''真''', '''好''', '''!''']
__lowercase = tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ ,lowercase__ )
__lowercase = '''今天天气真好!'''
__lowercase = [tokenizer.bos_token] + tokens
__lowercase = [6, 9_8_0_2, 1_4_9_6_2, 2_0_8_2, 8_3_1, 2_4_4]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) ,lowercase__ )
__lowercase = tokenizer.decode(lowercase__ )
self.assertEqual(lowercase__ ,lowercase__ )
| 41 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Dict , lowerCamelCase : List[str] , lowerCamelCase : Optional[Any]=13 , lowerCamelCase : Dict=32 , lowerCamelCase : Optional[int]=3 , lowerCamelCase : List[Any]=4 , lowerCamelCase : Tuple=[10, 20, 30, 40] , lowerCamelCase : str=[2, 2, 3, 2] , lowerCamelCase : Optional[int]=True , lowerCamelCase : List[Any]=True , lowerCamelCase : Union[str, Any]=37 , lowerCamelCase : Any="gelu" , lowerCamelCase : Tuple=10 , lowerCamelCase : Optional[Any]=0.02 , lowerCamelCase : Union[str, Any]=["stage2", "stage3", "stage4"] , lowerCamelCase : Any=[2, 3, 4] , lowerCamelCase : Tuple=None , ) -> int:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = num_stages
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = num_labels
_UpperCAmelCase = initializer_range
_UpperCAmelCase = out_features
_UpperCAmelCase = out_indices
_UpperCAmelCase = scope
def lowerCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowerCamelCase ( self : Union[str, Any] , lowerCamelCase : List[str] , lowerCamelCase : Optional[int] , lowerCamelCase : Any ) -> str:
"""simple docstring"""
_UpperCAmelCase = ConvNextModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_UpperCAmelCase = model(lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase ( self : Dict , lowerCamelCase : Tuple , lowerCamelCase : Tuple , lowerCamelCase : List[Any] ) -> int:
"""simple docstring"""
_UpperCAmelCase = ConvNextForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_UpperCAmelCase = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self : Dict , lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Dict ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = ConvNextBackbone(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_UpperCAmelCase = model(lowerCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_UpperCAmelCase = None
_UpperCAmelCase = ConvNextBackbone(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_UpperCAmelCase = model(lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
_lowerCamelCase = (
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def lowerCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_UpperCAmelCase = ConvNextModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def lowerCamelCase ( self : int ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def lowerCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def lowerCamelCase ( self : int ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def lowerCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
pass
def lowerCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(lowerCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def lowerCamelCase ( self : Tuple ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowerCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase )
def lowerCamelCase ( self : int ) -> Any:
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] ):
_UpperCAmelCase = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
_UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowerCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def lowerCamelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = ConvNextModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( ) -> Tuple:
_UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase ( self : Dict ) -> int:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def lowerCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_UpperCAmelCase = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(lowerCamelCase )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=lowerCamelCase , return_tensors="""pt""" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**lowerCamelCase )
# verify the logits
_UpperCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
_UpperCAmelCase = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase , UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = (ConvNextBackbone,) if is_torch_available() else ()
_lowerCamelCase = ConvNextConfig
_lowerCamelCase = False
def lowerCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = ConvNextModelTester(self ) | 108 | 0 |
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : Optional[Any] = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
_lowerCamelCase : List[Any] = DatasetInfosDict.from_directory(lowercase__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : List[str] = str(lowercase__ )
dataset_info.write_to_directory(lowercase__ )
_lowerCamelCase : Union[str, Any] = DatasetInfo.from_directory(lowercase__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(lowercase__ , 'dataset_info.json' ) )
def _snake_case ( ):
_lowerCamelCase : int = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
_lowerCamelCase : Optional[Any] = dataset_info._to_yaml_dict()
assert sorted(lowercase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
_lowerCamelCase : Optional[Any] = yaml.safe_dump(lowercase__ )
_lowerCamelCase : str = yaml.safe_load(lowercase__ )
assert dataset_info_yaml_dict == reloaded
def _snake_case ( ):
_lowerCamelCase : str = DatasetInfo()
_lowerCamelCase : str = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : int = str(lowercase__ )
dataset_infos_dict.write_to_directory(lowercase__ )
_lowerCamelCase : List[str] = DatasetInfosDict.from_directory(lowercase__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_lowerCamelCase : Optional[int] = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_lowerCamelCase : Any = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(lowercase__ , 'README.md' ) ) | 710 |
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
lowercase__ = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def __init__( self , lowercase = 101 ):
_lowerCamelCase : Optional[int] = length
def __len__( self ):
return self.length
def __getitem__( self , lowercase ):
return i
class lowerCAmelCase__ :
'''simple docstring'''
def __call__( self , lowercase ):
return {"input_ids": torch.tensor(lowercase ), "labels": torch.tensor(lowercase )}
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
# Add some (unused) params otherwise DDP will complain.
_lowerCamelCase : int = nn.Linear(120 , 80 )
def A_ ( self , lowercase , lowercase=None ):
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
@require_torch_neuroncore
def A_ ( self ):
_lowerCamelCase : Optional[Any] = F'''--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
_lowerCamelCase : int = self.get_auto_remove_tmp_dir()
_lowerCamelCase : str = F'''--output_dir {output_dir}'''.split()
_lowerCamelCase : Tuple = ['torchrun'] + distributed_args + args
execute_subprocess_async(lowercase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
@require_torch_multi_gpu
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = F'''--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
_lowerCamelCase : List[str] = self.get_auto_remove_tmp_dir()
_lowerCamelCase : Any = F'''--output_dir {output_dir}'''.split()
_lowerCamelCase : Any = ['torchrun'] + distributed_args + args
execute_subprocess_async(lowercase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
lowercase__ = HfArgumentParser((TrainingArguments,))
lowercase__ = parser.parse_args_into_dataclasses()[0]
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
F"distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
lowercase__ = DummyDataset(dataset_length)
def _snake_case ( lowercase__ ):
_lowerCamelCase : int = list(range(len(lowercase__ ) ) )
_lowerCamelCase : Optional[int] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'Predictions and/or labels do not match expected results:\n - predictions: '
f'''{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}''' )
return {"success": success}
lowercase__ = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
lowercase__ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowercase__ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowercase__ = 2
lowercase__ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowercase__ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowercase__ = None | 492 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
A_ : Union[str, Any] = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def UpperCamelCase__ ( __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : int=None ) -> str:
'''simple docstring'''
snake_case__ : Tuple = XLNetConfig.from_json_file(__magic_name__ )
snake_case__ : str = finetuning_task.lower() if finetuning_task is not None else """"""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f"Building PyTorch XLNetForSequenceClassification model from configuration: {config}" )
snake_case__ : Any = finetuning_task
snake_case__ : List[Any] = GLUE_TASKS_NUM_LABELS[finetuning_task]
snake_case__ : Optional[Any] = XLNetForSequenceClassification(__magic_name__ )
elif "squad" in finetuning_task:
snake_case__ : int = finetuning_task
snake_case__ : str = XLNetForQuestionAnswering(__magic_name__ )
else:
snake_case__ : List[str] = XLNetLMHeadModel(__magic_name__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(__magic_name__ , __magic_name__ , __magic_name__ )
# Save pytorch-model
snake_case__ : List[str] = os.path.join(__magic_name__ , __magic_name__ )
snake_case__ : Tuple = os.path.join(__magic_name__ , __magic_name__ )
print(f"Save PyTorch model to {os.path.abspath(__magic_name__ )}" )
torch.save(model.state_dict() , __magic_name__ )
print(f"Save configuration file to {os.path.abspath(__magic_name__ )}" )
with open(__magic_name__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
A_ : Union[str, Any] = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 38 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
_lowerCamelCase : Optional[Any] = {
"169M": 1_2,
"430M": 2_4,
"1B5": 2_4,
"3B": 3_2,
"7B": 3_2,
"14B": 4_0,
}
_lowerCamelCase : int = {
"169M": 7_6_8,
"430M": 1_0_2_4,
"1B5": 2_0_4_8,
"3B": 2_5_6_0,
"7B": 4_0_9_6,
"14B": 5_1_2_0,
}
def _UpperCAmelCase (UpperCamelCase_ : Optional[Any] ):
'''simple docstring'''
_lowerCAmelCase : str = list(state_dict.keys() )
for name in state_dict_keys:
_lowerCAmelCase : str = state_dict.pop(UpperCamelCase_ )
# emb -> embedding
if name.startswith("""emb.""" ):
_lowerCAmelCase : str = name.replace("""emb.""" , """embeddings.""" )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("""blocks.0.ln0""" ):
_lowerCAmelCase : Tuple = name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" )
# att -> attention
_lowerCAmelCase : Dict = re.sub(R"""blocks\.(\d+)\.att""" , R"""blocks.\1.attention""" , UpperCamelCase_ )
# ffn -> feed_forward
_lowerCAmelCase : List[Any] = re.sub(R"""blocks\.(\d+)\.ffn""" , R"""blocks.\1.feed_forward""" , UpperCamelCase_ )
# time_mix_k -> time_mix_key and reshape
if name.endswith(""".time_mix_k""" ):
_lowerCAmelCase : Dict = name.replace(""".time_mix_k""" , """.time_mix_key""" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(""".time_mix_v""" ):
_lowerCAmelCase : Any = name.replace(""".time_mix_v""" , """.time_mix_value""" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(""".time_mix_r""" ):
_lowerCAmelCase : List[str] = name.replace(""".time_mix_r""" , """.time_mix_receptance""" )
if name != "head.weight":
_lowerCAmelCase : Optional[int] = """rwkv.""" + name
_lowerCAmelCase : List[Any] = weight
return state_dict
def _UpperCAmelCase (UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : int=None ):
'''simple docstring'''
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" )
_lowerCAmelCase : List[str] = 50277
_lowerCAmelCase : Any = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" )
else:
_lowerCAmelCase : int = PreTrainedTokenizerFast(tokenizer_file=UpperCamelCase_ )
_lowerCAmelCase : int = len(UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
# 2. Build the config
_lowerCAmelCase : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_lowerCAmelCase : Any = candidate
break
if size is None:
raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" )
if size not in possible_sizes:
raise ValueError(F"`size` should be one of {possible_sizes}, got {size}." )
_lowerCAmelCase : List[Any] = RwkvConfig(
vocab_size=UpperCamelCase_ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(UpperCamelCase_ )
# 3. Download model file then convert state_dict
_lowerCAmelCase : Optional[Any] = hf_hub_download(UpperCamelCase_ , UpperCamelCase_ )
_lowerCAmelCase : Optional[Any] = torch.load(UpperCamelCase_ , map_location="""cpu""" )
_lowerCAmelCase : Any = convert_state_dict(UpperCamelCase_ )
# 4. Split in shards and save
_lowerCAmelCase , _lowerCAmelCase : Dict = shard_checkpoint(UpperCamelCase_ )
for shard_file, shard in shards.items():
torch.save(UpperCamelCase_ , os.path.join(UpperCamelCase_ , UpperCamelCase_ ) )
if index is not None:
_lowerCAmelCase : Tuple = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
# Save the index as well
with open(UpperCamelCase_ , """w""" , encoding="""utf-8""" ) as f:
_lowerCAmelCase : Optional[Any] = json.dumps(UpperCamelCase_ , indent=2 , sort_keys=UpperCamelCase_ ) + """\n"""
f.write(UpperCamelCase_ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"""Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" )
_lowerCAmelCase : Union[str, Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_lowerCAmelCase : int = torch.load(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(UpperCamelCase_ , UpperCamelCase_ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" )
_lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained(UpperCamelCase_ )
model.push_to_hub(UpperCamelCase_ , max_shard_size="""2GB""" )
tokenizer.push_to_hub(UpperCamelCase_ )
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint."
)
parser.add_argument(
"--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo."
)
parser.add_argument(
"--output_dir", default=None, type=str, required=True, help="Where to save the converted model."
)
parser.add_argument(
"--tokenizer_file",
default=None,
type=str,
help="Path to the tokenizer file to use (if not provided, only the model is converted).",
)
parser.add_argument(
"--size",
default=None,
type=str,
help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Push to the Hub the converted model.",
)
parser.add_argument(
"--model_name",
default=None,
type=str,
help="Name of the pushed model on the Hub, including the username / organization.",
)
_lowerCamelCase : Tuple = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 429 | 0 |
"""simple docstring"""
class a :
def __init__( self : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] ):
_UpperCAmelCase = name
_UpperCAmelCase = value
_UpperCAmelCase = weight
def __repr__( self : List[str] ):
return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def lowerCAmelCase_ ( self : Tuple ):
return self.value
def lowerCAmelCase_ ( self : Optional[Any] ):
return self.name
def lowerCAmelCase_ ( self : Optional[Any] ):
return self.weight
def lowerCAmelCase_ ( self : List[str] ):
return self.value / self.weight
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = []
for i in range(len(lowercase ) ):
menu.append(Things(name[i] ,value[i] ,weight[i] ) )
return menu
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = sorted(lowercase ,key=lowercase ,reverse=lowercase )
_UpperCAmelCase = []
_UpperCAmelCase , _UpperCAmelCase = 0.0, 0.0
for i in range(len(lowercase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __UpperCAmelCase ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 | """simple docstring"""
import datasets
UpperCAmelCase__ = """\
@InProceedings{conneau2018xnli,
author = \"Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin\",
title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",
booktitle = \"Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing\",
year = \"2018\",
publisher = \"Association for Computational Linguistics\",
location = \"Brussels, Belgium\",
}
"""
UpperCAmelCase__ = """\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
"""
UpperCAmelCase__ = """
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
'accuracy': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric(\"xnli\")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
"""
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def lowerCAmelCase_ ( self : Union[str, Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : Any ):
return {"accuracy": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
| 275 | 0 |
def A__ ( lowercase: str ) -> str:
A : Tuple =''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def A__ ( lowercase: str ) -> dict[str, str]:
A : List[Any] =[chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
A : Dict =remove_duplicates(key.upper() )
A : Tuple =len(lowercase )
# First fill cipher with key characters
A : str ={alphabet[i]: char for i, char in enumerate(lowercase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(lowercase ), 26 ):
A : Tuple =alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
A : List[str] =alphabet[i - offset]
A : Dict =char
return cipher_alphabet
def A__ ( lowercase: str, lowercase: dict[str, str] ) -> str:
return "".join(cipher_map.get(lowercase, lowercase ) for ch in message.upper() )
def A__ ( lowercase: str, lowercase: dict[str, str] ) -> str:
A : Union[str, Any] ={v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(lowercase, lowercase ) for ch in message.upper() )
def A__ ( ) -> None:
A : Union[str, Any] =input('Enter message to encode or decode: ' ).strip()
A : Optional[Any] =input('Enter keyword: ' ).strip()
A : Optional[Any] =input('Encipher or decipher? E/D:' ).strip()[0].lower()
try:
A : Optional[Any] ={'e': encipher, 'd': decipher}[option]
except KeyError:
raise KeyError('invalid input option' )
A : Dict =create_cipher_map(lowercase )
print(func(lowercase, lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 305 | import os
def A__ ( lowercase: str = "input.txt" ) -> int:
with open(os.path.join(os.path.dirname(lowercase ), lowercase ) ) as input_file:
A : Dict =[
[int(lowercase ) for element in line.split(',' )]
for line in input_file.readlines()
]
A : Optional[int] =len(lowercase )
A : Optional[int] =len(matrix[0] )
A : Optional[int] =[[-1 for _ in range(lowercase )] for _ in range(lowercase )]
for i in range(lowercase ):
A : Optional[int] =matrix[i][0]
for j in range(1, lowercase ):
for i in range(lowercase ):
A : Optional[int] =minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1, lowercase ):
A : Union[str, Any] =min(
minimal_path_sums[i][j], minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2, -1, -1 ):
A : Tuple =min(
minimal_path_sums[i][j], minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 305 | 1 |
"""simple docstring"""
from pathlib import Path
import fire
def __lowerCAmelCase ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : int ) -> Union[str, Any]:
_UpperCamelCase : Dict = Path(__lowerCAmelCase )
_UpperCamelCase : List[Any] = Path(__lowerCAmelCase )
dest_dir.mkdir(exist_ok=__lowerCAmelCase )
for path in src_dir.iterdir():
_UpperCamelCase : Optional[int] = [x.rstrip() for x in list(path.open().readlines() )][:n]
_UpperCamelCase : Union[str, Any] = dest_dir.joinpath(path.name )
print(__lowerCAmelCase )
dest_path.open("w" ).write("\n".join(__lowerCAmelCase ) )
if __name__ == "__main__":
fire.Fire(minify)
| 718 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCAmelCase = """vit_mae"""
def __init__(self , lowerCAmelCase__=7_68 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=30_72 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=2_24 , lowerCAmelCase__=16 , lowerCAmelCase__=3 , lowerCAmelCase__=True , lowerCAmelCase__=16 , lowerCAmelCase__=5_12 , lowerCAmelCase__=8 , lowerCAmelCase__=20_48 , lowerCAmelCase__=0.75 , lowerCAmelCase__=False , **lowerCAmelCase__ , ):
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
_UpperCamelCase : Union[str, Any] = hidden_size
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : Tuple = intermediate_size
_UpperCamelCase : Tuple = hidden_act
_UpperCamelCase : Optional[Any] = hidden_dropout_prob
_UpperCamelCase : int = attention_probs_dropout_prob
_UpperCamelCase : Union[str, Any] = initializer_range
_UpperCamelCase : Tuple = layer_norm_eps
_UpperCamelCase : Dict = image_size
_UpperCamelCase : Union[str, Any] = patch_size
_UpperCamelCase : List[Any] = num_channels
_UpperCamelCase : Optional[int] = qkv_bias
_UpperCamelCase : List[str] = decoder_num_attention_heads
_UpperCamelCase : int = decoder_hidden_size
_UpperCamelCase : Dict = decoder_num_hidden_layers
_UpperCamelCase : Dict = decoder_intermediate_size
_UpperCamelCase : str = mask_ratio
_UpperCamelCase : List[str] = norm_pix_loss
| 239 | 0 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A : Union[str, Any] = logging.get_logger(__name__)
__A : Dict = {"vocab_file": "spiece.model"}
__A : str = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
__A : Dict = {
"AI-Sweden/gpt-sw3-126m": 2048,
"AI-Sweden/gpt-sw3-350m": 2048,
"AI-Sweden/gpt-sw3-1.6b": 2048,
"AI-Sweden/gpt-sw3-6.7b": 2048,
"AI-Sweden/gpt-sw3-20b": 2048,
}
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
def __init__( self : Tuple , lowerCamelCase : str , lowerCamelCase : List[Any]=False , lowerCamelCase : str=False , lowerCamelCase : Union[str, Any]=False , lowerCamelCase : int=None , lowerCamelCase : Tuple=None , lowerCamelCase : Dict=None , lowerCamelCase : Any=None , lowerCamelCase : Optional[Dict[str, Any]] = None , **lowerCamelCase : Dict , ) -> None:
lowerCAmelCase_ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase_ : Optional[Any] = kwargs.get("""name_or_path""" )
if name_or_path is None:
logger.warning(
"""name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"""
""" you are testing the model, this can safely be ignored""" )
lowerCAmelCase_ : Optional[Any] = """None"""
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
lowerCAmelCase_ : Dict = """<|endoftext|>""" if eos_token is None else eos_token
lowerCAmelCase_ : str = """<unk>""" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
lowerCAmelCase_ : List[str] = unk_token if pad_token is None else pad_token
lowerCAmelCase_ : Optional[Any] = eos_token if bos_token is None else bos_token
else:
lowerCAmelCase_ : Optional[Any] = """<pad>""" if pad_token is None else pad_token
lowerCAmelCase_ : int = """<s>""" if bos_token is None else bos_token
super().__init__(
do_lower_case=lowerCamelCase , remove_space=lowerCamelCase , keep_accents=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , pad_token=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , )
lowerCAmelCase_ : Any = do_lower_case
lowerCAmelCase_ : str = remove_space
lowerCAmelCase_ : Union[str, Any] = keep_accents
lowerCAmelCase_ : int = vocab_file
lowerCAmelCase_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase )
# Used for whitespace normalization in input texts
# fmt : off
lowerCAmelCase_ : List[Any] = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """"""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
lowerCAmelCase_ : Dict = re.compile(
F'[{"".join(map(lowerCamelCase , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(1_27 , 1_60 ) ) + [1_60, 1_73, 82_03] ) )}]' )
def __getstate__( self : Any ) -> List[str]:
lowerCAmelCase_ : Tuple = self.__dict__.copy()
lowerCAmelCase_ : int = None
return state
def __setstate__( self : List[str] , lowerCamelCase : str ) -> Dict:
lowerCAmelCase_ : Optional[int] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCAmelCase_ : int = {}
lowerCAmelCase_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def __lowercase ( self : List[str] ) -> int:
return len(self.sp_model )
def __lowercase ( self : Optional[Any] , lowerCamelCase : str ) -> str:
lowerCAmelCase_ : Optional[int] = self.non_printing_characters_re.sub("""""" , lowerCamelCase )
# Normalize whitespaces
lowerCAmelCase_ : Optional[int] = """""".join([char if char not in self.whitespaces else """ """ for char in text] )
# NFC Unicode normalization
lowerCAmelCase_ : Optional[int] = unicodedata.normalize("""NFC""" , lowerCamelCase )
return text
def __lowercase ( self : List[Any] , lowerCamelCase : str , **lowerCamelCase : Any ) -> List[str]:
lowerCAmelCase_ : Tuple = self.preprocess_text(lowerCamelCase )
return self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase )
def __lowercase ( self : List[Any] , lowerCamelCase : str ) -> int:
return self.sp_model.PieceToId(lowerCamelCase )
def __lowercase ( self : Union[str, Any] , lowerCamelCase : int ) -> str:
return self.sp_model.IdToPiece(lowerCamelCase )
@staticmethod
def __lowercase ( lowerCamelCase : str ) -> str:
return out_string
def __lowercase ( self : Optional[int] , lowerCamelCase : List[str] ) -> str:
lowerCAmelCase_ : str = []
lowerCAmelCase_ : int = """"""
lowerCAmelCase_ : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase ) + token
lowerCAmelCase_ : Any = True
lowerCAmelCase_ : Dict = []
else:
current_sub_tokens.append(lowerCamelCase )
lowerCAmelCase_ : str = False
out_string += self.sp_model.decode(lowerCamelCase )
return out_string
def __lowercase ( self : int ) -> Dict[str, int]:
lowerCAmelCase_ : int = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowercase ( self : str , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase_ : Optional[int] = os.path.join(
lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase , """wb""" ) as fi:
lowerCAmelCase_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
return (out_vocab_file,)
def __lowercase ( self : Optional[Any] , lowerCamelCase : Union[str, List[str]] , lowerCamelCase : Union[str, bool] = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
if isinstance(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase_ : Optional[Any] = self.preprocess_text(lowerCamelCase )
lowerCAmelCase_ : Tuple = self.sp_model.encode(lowerCamelCase )
else:
lowerCAmelCase_ : Tuple = [self.preprocess_text(lowerCamelCase ) for t in text]
lowerCAmelCase_ : Tuple = self.sp_model.encode(lowerCamelCase )
if return_tensors is True or return_tensors == "pt":
lowerCAmelCase_ : Optional[int] = torch.tensor(lowerCamelCase )
return token_ids
def __lowercase ( self : List[str] , lowerCamelCase : Union[int, List[int]] ) -> str:
return self.sp_model.decode(lowerCamelCase )
def __lowercase ( self : List[Any] , lowerCamelCase : "Conversation" ) -> List[int]:
lowerCAmelCase_ : Optional[int] = [F'User: {text}' if is_user else F'Bot: {text}' for is_user, text in conversation.iter_texts()]
lowerCAmelCase_ : Tuple = (
F'{self.eos_token}{self.bos_token}' + F'{self.bos_token}'.join(lowerCamelCase ) + F'{self.bos_token}Bot:'
)
return self.encode(text=lowerCamelCase )
| 275 |
'''simple docstring'''
from collections.abc import Callable
class __snake_case :
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase : Callable | None = None ) -> None:
# Stores actual heap items.
lowerCAmelCase_ : list = []
# Stores indexes of each item for supporting updates and deletion.
lowerCAmelCase_ : dict = {}
# Stores current size of heap.
lowerCAmelCase_ : List[Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
lowerCAmelCase_ : Tuple = key or (lambda lowerCamelCase : x)
def __lowercase ( self : Optional[Any] , lowerCamelCase : int ) -> int | None:
return int((i - 1) / 2 ) if i > 0 else None
def __lowercase ( self : Union[str, Any] , lowerCamelCase : int ) -> int | None:
lowerCAmelCase_ : List[str] = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowercase ( self : Optional[Any] , lowerCamelCase : int ) -> int | None:
lowerCAmelCase_ : List[Any] = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowercase ( self : List[str] , lowerCamelCase : int , lowerCamelCase : int ) -> None:
lowerCAmelCase_, lowerCAmelCase_ : Union[str, Any] = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
lowerCAmelCase_, lowerCAmelCase_ : List[Any] = self.arr[j], self.arr[i]
def __lowercase ( self : Tuple , lowerCamelCase : int , lowerCamelCase : int ) -> bool:
return self.arr[i][1] < self.arr[j][1]
def __lowercase ( self : int , lowerCamelCase : int ) -> int:
lowerCAmelCase_ : List[str] = self._left(lowerCamelCase )
lowerCAmelCase_ : Optional[Any] = self._right(lowerCamelCase )
lowerCAmelCase_ : Tuple = i
if left is not None and not self._cmp(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase_ : int = left
if right is not None and not self._cmp(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase_ : Optional[Any] = right
return valid_parent
def __lowercase ( self : List[Any] , lowerCamelCase : int ) -> None:
lowerCAmelCase_ : Tuple = self._parent(lowerCamelCase )
while parent is not None and not self._cmp(lowerCamelCase , lowerCamelCase ):
self._swap(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_, lowerCAmelCase_ : str = parent, self._parent(lowerCamelCase )
def __lowercase ( self : Tuple , lowerCamelCase : int ) -> None:
lowerCAmelCase_ : Optional[Any] = self._get_valid_parent(lowerCamelCase )
while valid_parent != index:
self._swap(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_, lowerCAmelCase_ : int = valid_parent, self._get_valid_parent(lowerCamelCase )
def __lowercase ( self : Optional[Any] , lowerCamelCase : int , lowerCamelCase : int ) -> None:
if item not in self.pos_map:
return
lowerCAmelCase_ : Dict = self.pos_map[item]
lowerCAmelCase_ : Dict = [item, self.key(lowerCamelCase )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(lowerCamelCase )
self._heapify_down(lowerCamelCase )
def __lowercase ( self : int , lowerCamelCase : int ) -> None:
if item not in self.pos_map:
return
lowerCAmelCase_ : List[str] = self.pos_map[item]
del self.pos_map[item]
lowerCAmelCase_ : Tuple = self.arr[self.size - 1]
lowerCAmelCase_ : List[str] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(lowerCamelCase )
self._heapify_down(lowerCamelCase )
def __lowercase ( self : List[Any] , lowerCamelCase : int , lowerCamelCase : int ) -> None:
lowerCAmelCase_ : Any = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(lowerCamelCase )] )
else:
lowerCAmelCase_ : str = [item, self.key(lowerCamelCase )]
lowerCAmelCase_ : Optional[Any] = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowercase ( self : str ) -> tuple | None:
return self.arr[0] if self.size else None
def __lowercase ( self : Optional[Any] ) -> tuple | None:
lowerCAmelCase_ : int = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def UpperCamelCase_ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 275 | 1 |
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
return x if y == 0 else greatest_common_divisor(lowercase_ , x % y )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
return (x * y) // greatest_common_divisor(lowercase_ , lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ = 20 ) -> int:
"""simple docstring"""
__UpperCamelCase = 1
for i in range(1 , n + 1 ):
__UpperCamelCase = lcm(lowercase_ , lowercase_ )
return g
if __name__ == "__main__":
print(f"""{solution() = }""")
| 375 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class _lowerCamelCase :
"""simple docstring"""
def __init__( self : str , snake_case : Any , snake_case : str=14 , snake_case : Dict=7 , snake_case : Any=True , snake_case : Any=True , snake_case : str=True , snake_case : List[str]=True , snake_case : int=True , snake_case : List[Any]=99 , snake_case : Optional[int]=32 , snake_case : str=5 , snake_case : int=4 , snake_case : str=37 , snake_case : Union[str, Any]="gelu" , snake_case : List[str]=0.1 , snake_case : Optional[int]=0.1 , snake_case : Tuple=512 , snake_case : int=16 , snake_case : Any=2 , snake_case : List[str]=0.02 , snake_case : List[Any]=3 , snake_case : str=4 , snake_case : Tuple=None , ):
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_token_type_ids
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_labels
__UpperCamelCase = use_mc_token_ids
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = num_labels
__UpperCamelCase = num_choices
__UpperCamelCase = scope
__UpperCamelCase = self.vocab_size - 1
def snake_case ( self : Optional[Any] ):
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = None
if self.use_token_type_ids:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase = None
if self.use_mc_token_ids:
__UpperCamelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase = self.get_config()
__UpperCamelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def snake_case ( self : Optional[int] ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def snake_case ( self : Any , snake_case : Optional[int] , snake_case : Any , snake_case : Union[str, Any] , snake_case : Dict , snake_case : List[Any] , *snake_case : Union[str, Any] ):
__UpperCamelCase = CTRLModel(config=snake_case )
model.to(snake_case )
model.eval()
model(snake_case , token_type_ids=snake_case , head_mask=snake_case )
model(snake_case , token_type_ids=snake_case )
__UpperCamelCase = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def snake_case ( self : Any , snake_case : Tuple , snake_case : Tuple , snake_case : Union[str, Any] , snake_case : Dict , snake_case : List[str] , *snake_case : Union[str, Any] ):
__UpperCamelCase = CTRLLMHeadModel(snake_case )
model.to(snake_case )
model.eval()
__UpperCamelCase = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self : Any ):
__UpperCamelCase = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) = config_and_inputs
__UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def snake_case ( self : List[Any] , snake_case : int , snake_case : Dict , snake_case : Optional[Any] , snake_case : Any , *snake_case : str ):
__UpperCamelCase = self.num_labels
__UpperCamelCase = CTRLForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : int = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowerCAmelCase__ : List[str] = (CTRLLMHeadModel,) if is_torch_available() else ()
lowerCAmelCase__ : str = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ : Dict = True
lowerCAmelCase__ : List[Any] = False
lowerCAmelCase__ : str = False
def snake_case ( self : str , snake_case : Optional[int] , snake_case : List[str] , snake_case : Tuple , snake_case : Dict , snake_case : List[str] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def snake_case ( self : Optional[int] ):
__UpperCamelCase = CTRLModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=snake_case , n_embd=37 )
def snake_case ( self : Union[str, Any] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : Dict ):
self.config_tester.run_common_tests()
def snake_case ( self : Optional[int] ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*snake_case )
def snake_case ( self : Tuple ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*snake_case )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case ( self : List[Any] ):
pass
@slow
def snake_case ( self : Optional[Any] ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = CTRLModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def snake_case ( self : List[Any] ):
pass
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : int ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def snake_case ( self : Optional[int] ):
__UpperCamelCase = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(snake_case )
__UpperCamelCase = torch.tensor(
[[11859, 0, 1611, 8]] , dtype=torch.long , device=snake_case ) # Legal the president is
__UpperCamelCase = [
11859,
0,
1611,
8,
5,
150,
26449,
2,
19,
348,
469,
3,
2595,
48,
20740,
246533,
246533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__UpperCamelCase = model.generate(snake_case , do_sample=snake_case )
self.assertListEqual(output_ids[0].tolist() , snake_case )
| 375 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {"""vocab_file""": """sentencepiece.bpe.model"""}
__SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
}
__SCREAMING_SNAKE_CASE = {
"""moussaKam/mbarthez""": 10_24,
"""moussaKam/barthez""": 10_24,
"""moussaKam/barthez-orangesum-title""": 10_24,
}
__SCREAMING_SNAKE_CASE = """▁"""
class __snake_case ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase_ : Dict = VOCAB_FILES_NAMES
lowerCAmelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : List[str] = ['input_ids', 'attention_mask']
def __init__( self :Any , UpperCamelCase__ :Optional[Any] , UpperCamelCase__ :Tuple="<s>" , UpperCamelCase__ :int="</s>" , UpperCamelCase__ :Optional[Any]="</s>" , UpperCamelCase__ :List[str]="<s>" , UpperCamelCase__ :Union[str, Any]="<unk>" , UpperCamelCase__ :Union[str, Any]="<pad>" , UpperCamelCase__ :List[Any]="<mask>" , UpperCamelCase__ :Optional[Dict[str, Any]] = None , **UpperCamelCase__ :Optional[int] , ):
# Mask token behave like a normal word, i.e. include the space before it
_a = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
_a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
_a = vocab_file
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase__ ) )
_a = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
_a = len(self.sp_model ) - 1
_a = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE_ ( self :int , UpperCamelCase__ :List[int] , UpperCamelCase__ :Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a = [self.cls_token_id]
_a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self :str , UpperCamelCase__ :List[int] , UpperCamelCase__ :Optional[List[int]] = None , UpperCamelCase__ :bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1]
def SCREAMING_SNAKE_CASE_ ( self :Tuple , UpperCamelCase__ :List[int] , UpperCamelCase__ :Optional[List[int]] = None ):
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
return len(self.sp_model )
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
_a = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , UpperCamelCase__ :str ):
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] , UpperCamelCase__ :Optional[int] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_a = self.sp_model.PieceToId(UpperCamelCase__ )
return spm_id if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] , UpperCamelCase__ :List[str] ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :int , UpperCamelCase__ :Any ):
_a = []
_a = ""
_a = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
_a = True
_a = []
else:
current_sub_tokens.append(UpperCamelCase__ )
_a = False
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string.strip()
def __getstate__( self :Optional[Any] ):
_a = self.__dict__.copy()
_a = None
return state
def __setstate__( self :Dict , UpperCamelCase__ :List[Any] ):
_a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_a = {}
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE_ ( self :Dict , UpperCamelCase__ :str , UpperCamelCase__ :Optional[str] = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_a = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
| 388 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
__SCREAMING_SNAKE_CASE = False
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
return 12
@property
def SCREAMING_SNAKE_CASE_ ( self :int ):
return 12
@property
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
return 32
@property
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
torch.manual_seed(0 )
_a = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
_a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(UpperCamelCase__ )
@property
def SCREAMING_SNAKE_CASE_ ( self :str ):
torch.manual_seed(0 )
_a = 12
_a = 12
_a = {
"attention_bias": True,
"cross_attention_dim": 32,
"attention_head_dim": height * width,
"num_attention_heads": 1,
"num_vector_embeds": self.num_embed,
"num_embeds_ada_norm": self.num_embeds_ada_norm,
"norm_num_groups": 32,
"sample_size": width,
"activation_fn": "geglu-approximate",
}
_a = TransformeraDModel(**UpperCamelCase__ )
return model
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
_a = "cpu"
_a = self.dummy_vqvae
_a = self.dummy_text_encoder
_a = self.dummy_tokenizer
_a = self.dummy_transformer
_a = VQDiffusionScheduler(self.num_embed )
_a = LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCamelCase__ )
_a = VQDiffusionPipeline(
vqvae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , transformer=UpperCamelCase__ , scheduler=UpperCamelCase__ , learned_classifier_free_sampling_embeddings=UpperCamelCase__ , )
_a = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_a = "teddy bear playing in the pool"
_a = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
_a = pipe([prompt] , generator=UpperCamelCase__ , num_inference_steps=2 , output_type="np" )
_a = output.images
_a = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
_a = pipe(
[prompt] , generator=UpperCamelCase__ , output_type="np" , return_dict=UpperCamelCase__ , num_inference_steps=2 )[0]
_a = image[0, -3:, -3:, -1]
_a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
_a = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
_a = "cpu"
_a = self.dummy_vqvae
_a = self.dummy_text_encoder
_a = self.dummy_tokenizer
_a = self.dummy_transformer
_a = VQDiffusionScheduler(self.num_embed )
_a = LearnedClassifierFreeSamplingEmbeddings(
learnable=UpperCamelCase__ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
_a = VQDiffusionPipeline(
vqvae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , transformer=UpperCamelCase__ , scheduler=UpperCamelCase__ , learned_classifier_free_sampling_embeddings=UpperCamelCase__ , )
_a = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_a = "teddy bear playing in the pool"
_a = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
_a = pipe([prompt] , generator=UpperCamelCase__ , num_inference_steps=2 , output_type="np" )
_a = output.images
_a = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
_a = pipe(
[prompt] , generator=UpperCamelCase__ , output_type="np" , return_dict=UpperCamelCase__ , num_inference_steps=2 )[0]
_a = image[0, -3:, -3:, -1]
_a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
_a = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
_a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy" )
_a = VQDiffusionPipeline.from_pretrained("microsoft/vq-diffusion-ithq" )
_a = pipeline.to(UpperCamelCase__ )
pipeline.set_progress_bar_config(disable=UpperCamelCase__ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
_a = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
_a = pipeline(
"teddy bear playing in the pool" , num_images_per_prompt=1 , generator=UpperCamelCase__ , output_type="np" , )
_a = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 388 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCamelCase ( unittest.TestCase ):
def __init__( self , snake_case__ , snake_case__=7 , snake_case__=3 , snake_case__=18 , snake_case__=30 , snake_case__=400 , snake_case__=True , snake_case__=None , snake_case__=True , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = size if size is not None else {"height": 18, "width": 18}
_SCREAMING_SNAKE_CASE : Optional[int] = parent
_SCREAMING_SNAKE_CASE : Any = batch_size
_SCREAMING_SNAKE_CASE : List[str] = num_channels
_SCREAMING_SNAKE_CASE : List[str] = image_size
_SCREAMING_SNAKE_CASE : Any = min_resolution
_SCREAMING_SNAKE_CASE : Tuple = max_resolution
_SCREAMING_SNAKE_CASE : List[Any] = do_resize
_SCREAMING_SNAKE_CASE : Optional[int] = size
_SCREAMING_SNAKE_CASE : List[str] = apply_ocr
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCamelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = LayoutLMvaImageProcessingTester(self )
@property
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "do_resize" ) )
self.assertTrue(hasattr(snake_case__ , "size" ) )
self.assertTrue(hasattr(snake_case__ , "apply_ocr" ) )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
_SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE : Dict = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , snake_case__ )
self.assertIsInstance(encoding.boxes , snake_case__ )
# Test batched
_SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(snake_case__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
_SCREAMING_SNAKE_CASE : Optional[int] = image_processing(snake_case__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
_SCREAMING_SNAKE_CASE : Tuple = image_processing(snake_case__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = LayoutLMvaImageProcessor()
from datasets import load_dataset
_SCREAMING_SNAKE_CASE : Optional[Any] = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
_SCREAMING_SNAKE_CASE : Dict = Image.open(ds[0]["file"] ).convert("RGB" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(snake_case__ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
_SCREAMING_SNAKE_CASE : Tuple = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
_SCREAMING_SNAKE_CASE : Dict = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , snake_case__ )
self.assertListEqual(encoding.boxes , snake_case__ )
# with apply_OCR = False
_SCREAMING_SNAKE_CASE : int = LayoutLMvaImageProcessor(apply_ocr=snake_case__ )
_SCREAMING_SNAKE_CASE : int = image_processing(snake_case__ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 713 |
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def _lowerCAmelCase ( lowerCamelCase__ : str ) -> Optional[int]:
def decorator(lowerCamelCase__ : int ):
_SCREAMING_SNAKE_CASE : Optional[int] = getattr(lowerCamelCase__, "handle_key", [] )
handle += [key]
setattr(lowerCamelCase__, "handle_key", lowerCamelCase__ )
return func
return decorator
def _lowerCAmelCase ( *lowerCamelCase__ : List[str] ) -> Tuple:
def decorator(lowerCamelCase__ : Dict ):
_SCREAMING_SNAKE_CASE : List[Any] = getattr(lowerCamelCase__, "handle_key", [] )
handle += keys
setattr(lowerCamelCase__, "handle_key", lowerCamelCase__ )
return func
return decorator
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
def __new__( cls , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = super().__new__(cls , snake_case__ , snake_case__ , snake_case__ )
if not hasattr(snake_case__ , "key_handler" ):
setattr(snake_case__ , "key_handler" , {} )
setattr(snake_case__ , "handle_input" , KeyHandler.handle_input )
for value in attrs.values():
_SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(snake_case__ , "handle_key" , [] )
for key in handled_keys:
_SCREAMING_SNAKE_CASE : Tuple = value
return new_cls
@staticmethod
def __SCREAMING_SNAKE_CASE ( cls ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = get_character()
if char != KEYMAP["undefined"]:
_SCREAMING_SNAKE_CASE : Dict = ord(snake_case__ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = cls.key_handler.get(snake_case__ )
if handler:
_SCREAMING_SNAKE_CASE : Optional[int] = char
return handler(cls )
else:
return None
def _lowerCAmelCase ( cls : List[Any] ) -> str:
return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy() )
| 295 | 0 |
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def lowerCamelCase__ ( snake_case_ : np.ndarray , snake_case_ : np.ndarray ) -> float:
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(snake_case_ , snake_case_ ) ) )
def lowerCamelCase__ ( snake_case_ : np.ndarray , snake_case_ : np.ndarray ) -> list[list[list[float] | float]]:
if dataset.ndim != value_array.ndim:
__snake_case = (
'''Wrong input data\'s dimensions... '''
f"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(snake_case_ )
try:
if dataset.shape[1] != value_array.shape[1]:
__snake_case = (
'''Wrong input data\'s shape... '''
f"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(snake_case_ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('''Wrong shape''' )
if dataset.dtype != value_array.dtype:
__snake_case = (
'''Input data have different datatype... '''
f"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(snake_case_ )
__snake_case = []
for value in value_array:
__snake_case = euclidean(snake_case_ , dataset[0] )
__snake_case = dataset[0].tolist()
for dataset_value in dataset[1:]:
__snake_case = euclidean(snake_case_ , snake_case_ )
if dist > temp_dist:
__snake_case = temp_dist
__snake_case = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def lowerCamelCase__ ( snake_case_ : np.ndarray , snake_case_ : np.ndarray ) -> float:
return np.dot(snake_case_ , snake_case_ ) / (norm(snake_case_ ) * norm(snake_case_ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 592 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
snake_case_ = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
snake_case_ = concatenate_datasets
snake_case_ = DownloadConfig
snake_case_ = DownloadManager
snake_case_ = DownloadMode
snake_case_ = DownloadConfig
snake_case_ = DownloadMode
snake_case_ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 592 | 1 |
"""simple docstring"""
import math
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 ):
UpperCAmelCase_ = end or len(lowerCAmelCase__ )
for i in range(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = i
UpperCAmelCase_ = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
UpperCAmelCase_ = array[temp_index - 1]
temp_index -= 1
UpperCAmelCase_ = temp_index_value
return array
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): # Max Heap
UpperCAmelCase_ = index
UpperCAmelCase_ = 2 * index + 1 # Left Node
UpperCAmelCase_ = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
UpperCAmelCase_ = left_index
if right_index < heap_size and array[largest] < array[right_index]:
UpperCAmelCase_ = right_index
if largest != index:
UpperCAmelCase_ , UpperCAmelCase_ = array[largest], array[index]
heapify(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = len(lowerCAmelCase__ )
for i in range(n // 2 , -1 , -1 ):
heapify(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for i in range(n - 1 , 0 , -1 ):
UpperCAmelCase_ , UpperCAmelCase_ = array[0], array[i]
heapify(lowerCAmelCase__ , 0 , lowerCAmelCase__ )
return array
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = low
UpperCAmelCase_ = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
UpperCAmelCase_ , UpperCAmelCase_ = array[j], array[i]
i += 1
def a__ ( lowerCAmelCase__ ):
if len(lowerCAmelCase__ ) == 0:
return array
UpperCAmelCase_ = 2 * math.ceil(math.loga(len(lowerCAmelCase__ ) ) )
UpperCAmelCase_ = 16
return intro_sort(lowerCAmelCase__ , 0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ , lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(lowerCAmelCase__ )
max_depth -= 1
UpperCAmelCase_ = median_of_a(lowerCAmelCase__ , lowerCAmelCase__ , start + ((end - start) // 2) + 1 , end - 1 )
UpperCAmelCase_ = partition(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
intro_sort(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = p
return insertion_sort(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase = input("""Enter numbers separated by a comma : """).strip()
lowerCamelCase = [float(item) for item in user_input.split(""",""")]
print(sort(unsorted))
| 709 |
"""simple docstring"""
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""google/owlvit-base-patch32""": """https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json""",
"""google/owlvit-base-patch16""": """https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json""",
"""google/owlvit-large-patch14""": """https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json""",
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''owlvit_text_model'''
def __init__( self : List[Any] , _UpperCAmelCase : str=49408 , _UpperCAmelCase : str=512 , _UpperCAmelCase : Optional[Any]=2048 , _UpperCAmelCase : Optional[int]=12 , _UpperCAmelCase : Tuple=8 , _UpperCAmelCase : List[str]=16 , _UpperCAmelCase : List[str]="quick_gelu" , _UpperCAmelCase : Dict=1e-5 , _UpperCAmelCase : Union[str, Any]=0.0 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : Optional[int]=1.0 , _UpperCAmelCase : Dict=0 , _UpperCAmelCase : Dict=49406 , _UpperCAmelCase : Union[str, Any]=49407 , **_UpperCAmelCase : List[str] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = initializer_factor
@classmethod
def lowercase__ ( cls : int , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_UpperCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
UpperCAmelCase_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''owlvit_vision_model'''
def __init__( self : str , _UpperCAmelCase : List[str]=768 , _UpperCAmelCase : Optional[Any]=3072 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : List[str]=768 , _UpperCAmelCase : int=32 , _UpperCAmelCase : Dict="quick_gelu" , _UpperCAmelCase : Optional[Any]=1e-5 , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : List[str]=1.0 , **_UpperCAmelCase : List[str] , ) -> Dict:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = initializer_factor
@classmethod
def lowercase__ ( cls : Any , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : Union[str, Any] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_UpperCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
UpperCAmelCase_ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''owlvit'''
UpperCamelCase = True
def __init__( self : Tuple , _UpperCAmelCase : Any=None , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Tuple=512 , _UpperCAmelCase : Any=2.6592 , _UpperCAmelCase : Union[str, Any]=True , **_UpperCAmelCase : Union[str, Any] , ) -> Tuple:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
if text_config is None:
UpperCAmelCase_ = {}
logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." )
if vision_config is None:
UpperCAmelCase_ = {}
logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." )
UpperCAmelCase_ = OwlViTTextConfig(**_UpperCAmelCase )
UpperCAmelCase_ = OwlViTVisionConfig(**_UpperCAmelCase )
UpperCAmelCase_ = projection_dim
UpperCAmelCase_ = logit_scale_init_value
UpperCAmelCase_ = return_dict
UpperCAmelCase_ = 1.0
@classmethod
def lowercase__ ( cls : Dict , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : Tuple ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_UpperCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowercase__ ( cls : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , **_UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = {}
UpperCAmelCase_ = text_config
UpperCAmelCase_ = vision_config
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ = self.text_config.to_dict()
UpperCAmelCase_ = self.vision_config.to_dict()
UpperCAmelCase_ = self.__class__.model_type
return output
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def lowercase__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("attention_mask", {0: "batch", 1: "sequence"}),
] )
@property
def lowercase__ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("logits_per_image", {0: "batch"}),
("logits_per_text", {0: "batch"}),
("text_embeds", {0: "batch"}),
("image_embeds", {0: "batch"}),
] )
@property
def lowercase__ ( self : Any ) -> float:
'''simple docstring'''
return 1e-4
def lowercase__ ( self : List[str] , _UpperCAmelCase : "ProcessorMixin" , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = super().generate_dummy_inputs(
processor.tokenizer , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , framework=_UpperCAmelCase )
UpperCAmelCase_ = super().generate_dummy_inputs(
processor.image_processor , batch_size=_UpperCAmelCase , framework=_UpperCAmelCase )
return {**text_input_dict, **image_input_dict}
@property
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
return 14
| 14 | 0 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_lowerCAmelCase = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def _snake_case ( __snake_case , __snake_case ):
inspect_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCamelCase = path + ".py"
assert script_name in os.listdir(_SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def _snake_case ( __snake_case , __snake_case ):
inspect_metric(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCamelCase = path + ".py"
assert script_name in os.listdir(_SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def _snake_case ( __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def _snake_case ( __snake_case , __snake_case , __snake_case ):
with pytest.raises(_SCREAMING_SNAKE_CASE ):
get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def _snake_case ( __snake_case , __snake_case ):
_UpperCamelCase = get_dataset_config_names(_SCREAMING_SNAKE_CASE )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def _snake_case ( __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = get_dataset_infos(_SCREAMING_SNAKE_CASE )
assert list(infos.keys() ) == expected_configs
_UpperCamelCase = expected_configs[0]
assert expected_config in infos
_UpperCamelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def _snake_case ( __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = get_dataset_infos(_SCREAMING_SNAKE_CASE )
assert expected_config in infos
_UpperCamelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def _snake_case ( __snake_case , __snake_case , __snake_case ):
with pytest.raises(_SCREAMING_SNAKE_CASE ):
get_dataset_split_names(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
| 10 | """simple docstring"""
UpperCamelCase = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 473 | 0 |
'''simple docstring'''
def __lowerCAmelCase ( a_ ) -> bool:
'''simple docstring'''
if not isinstance(a_ , a_ ):
SCREAMING_SNAKE_CASE : str = f"""Input value of [number={number}] must be an integer"""
raise TypeError(a_ )
if number < 0:
return False
SCREAMING_SNAKE_CASE : Tuple = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 179 | '''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_lowerCAmelCase :Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def __lowerCAmelCase ( a_ ) -> List[Any]:
'''simple docstring'''
warnings.warn(
'The preprocess method is deprecated and will be removed in a future version. Please'
' use VaeImageProcessor.preprocess instead' , a_ , )
if isinstance(a_ , torch.Tensor ):
return image
elif isinstance(a_ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE : str = [image]
if isinstance(image[0] , PIL.Image.Image ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = image[0].size
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
SCREAMING_SNAKE_CASE : Any = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
SCREAMING_SNAKE_CASE : List[Any] = np.concatenate(a_ , axis=0 )
SCREAMING_SNAKE_CASE : Tuple = np.array(a_ ).astype(np.floataa ) / 255.0
SCREAMING_SNAKE_CASE : Tuple = image.transpose(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE : Dict = 2.0 * image - 1.0
SCREAMING_SNAKE_CASE : int = torch.from_numpy(a_ )
elif isinstance(image[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE : Tuple = torch.cat(a_ , dim=0 )
return image
def __lowerCAmelCase ( a_ ) -> List[Any]:
'''simple docstring'''
if isinstance(a_ , torch.Tensor ):
return mask
elif isinstance(a_ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE : Optional[int] = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = mask[0].size
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
SCREAMING_SNAKE_CASE : Tuple = [np.array(m.convert('L' ).resize((w, h) , resample=PIL_INTERPOLATION['nearest'] ) )[None, :] for m in mask]
SCREAMING_SNAKE_CASE : Union[str, Any] = np.concatenate(a_ , axis=0 )
SCREAMING_SNAKE_CASE : List[Any] = mask.astype(np.floataa ) / 255.0
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : Optional[Any] = 1
SCREAMING_SNAKE_CASE : str = torch.from_numpy(a_ )
elif isinstance(mask[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE : List[Any] = torch.cat(a_ , dim=0 )
return mask
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : UNetaDModel
snake_case__ : RePaintScheduler
def __init__( self , lowercase__ , lowercase__ ) -> Union[str, Any]:
super().__init__()
self.register_modules(unet=lowercase__ , scheduler=lowercase__ )
@torch.no_grad()
def __call__( self , lowercase__ , lowercase__ , lowercase__ = 250 , lowercase__ = 0.0 , lowercase__ = 10 , lowercase__ = 10 , lowercase__ = None , lowercase__ = "pil" , lowercase__ = True , ) -> Union[ImagePipelineOutput, Tuple]:
SCREAMING_SNAKE_CASE : Optional[int] = image
SCREAMING_SNAKE_CASE : List[str] = _preprocess_image(lowercase__ )
SCREAMING_SNAKE_CASE : List[str] = original_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE : Any = _preprocess_mask(lowercase__ )
SCREAMING_SNAKE_CASE : str = mask_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE : int = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(lowercase__ , lowercase__ ) and len(lowercase__ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(lowercase__ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
SCREAMING_SNAKE_CASE : Dict = original_image.shape
SCREAMING_SNAKE_CASE : Any = randn_tensor(lowercase__ , generator=lowercase__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowercase__ , lowercase__ , lowercase__ , self.device )
SCREAMING_SNAKE_CASE : Optional[Any] = eta
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.timesteps[0] + 1
SCREAMING_SNAKE_CASE : Tuple = generator[0] if isinstance(lowercase__ , lowercase__ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
SCREAMING_SNAKE_CASE : Optional[int] = self.unet(lowercase__ , lowercase__ ).sample
# compute previous image: x_t -> x_t-1
SCREAMING_SNAKE_CASE : Tuple = self.scheduler.step(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
SCREAMING_SNAKE_CASE : List[str] = self.scheduler.undo_step(lowercase__ , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE : int = t
SCREAMING_SNAKE_CASE : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : Dict = self.numpy_to_pil(lowercase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase__ )
| 179 | 1 |
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
UpperCAmelCase_ : int = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
UpperCAmelCase_ : Tuple = 10
UpperCAmelCase_ : Dict = 256
def UpperCAmelCase_ ( A ):
'''simple docstring'''
if len(A ) < MIN_NUM_TOKENS:
return None
_a : Union[str, Any] = MinHash(num_perm=A )
for token in set(A ):
min_hash.update(token.encode() )
return min_hash
def UpperCAmelCase_ ( A ):
'''simple docstring'''
return {t for t in NON_ALPHA.split(A ) if len(t.strip() ) > 0}
class a :
'''simple docstring'''
def __init__( self , *,
lowerCamelCase_ = 0.85 , ) -> List[Any]:
_a : int = duplication_jaccard_threshold
_a : Dict = NUM_PERM
_a : List[Any] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_a : Optional[Any] = defaultdict(lowerCamelCase_ )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
_a : List[Any] = self._index.query(lowerCamelCase_ )
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''' )
return
self._index.insert(lowerCamelCase_ , lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowerCamelCase_ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowerCamelCase_ )
def __UpperCamelCase ( self ) -> List[List[Dict]]:
_a : Union[str, Any] = []
for base, duplicates in self._duplicate_clusters.items():
_a : Union[str, Any] = [base] + list(lowerCamelCase_ )
# reformat the cluster to be a list of dict
_a : List[str] = [{'base_index': el[0], 'repo_name': el[1], 'path': el[2]} for el in cluster]
duplicate_clusters.append(lowerCamelCase_ )
return duplicate_clusters
def __UpperCamelCase ( self , lowerCamelCase_ ) -> None:
_a : Tuple = self.get_duplicate_clusters()
with open(lowerCamelCase_ , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase_ ( A ):
'''simple docstring'''
_a , _a : List[Any] = element
_a : Union[str, Any] = get_min_hash([t for t in NON_ALPHA.split(data['content'] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def UpperCAmelCase_ ( A ):
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(A , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
_a : int = DuplicationIndex(duplication_jaccard_threshold=A )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(A ) ) , max_queue_size=1_0_0 ) ):
di.add(A , A )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
_a : Optional[int] = get_tokens(A )
_a : Union[str, Any] = get_tokens(A )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
UpperCAmelCase_ : Optional[Any] = None
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
_a : str = []
for elementa in cluster:
_a : Union[str, Any] = _shared_dataset[elementa['base_index']]['content']
for elementa in extremes:
_a : Optional[Any] = _shared_dataset[elementa['base_index']]['content']
if jaccard_similarity(A , A ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_a : int = 1
extremes.append(A )
return extremes
def UpperCAmelCase_ ( A , A , A ):
'''simple docstring'''
global _shared_dataset
_a : List[str] = dataset
_a : int = []
_a : Dict = partial(_find_cluster_extremes_shared , jaccard_threshold=A )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
A , A , ) , total=len(A ) , ):
extremes_list.append(A )
return extremes_list
def UpperCAmelCase_ ( A , A = 0.85 ):
'''simple docstring'''
_a : Tuple = make_duplicate_clusters(A , A )
_a : Any = {x['base_index'] for cluster in duplicate_clusters for x in cluster}
_a : str = {}
_a : Dict = find_extremes(A , A , A )
for extremes in extremes_clusters:
for element in extremes:
_a : Dict = element
_a : int = duplicate_indices - set(extreme_dict.keys() )
_a : int = dataset.filter(lambda A , A : idx not in remove_indices , with_indices=A )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_a : Union[str, Any] = element['base_index'] in extreme_dict
if element["is_extreme"]:
_a : Optional[int] = extreme_dict[element['base_index']]['copies']
print(f'''Original dataset size: {len(A )}''' )
print(f'''Number of duplicate clusters: {len(A )}''' )
print(f'''Files in duplicate cluster: {len(A )}''' )
print(f'''Unique files in duplicate cluster: {len(A )}''' )
print(f'''Filtered dataset size: {len(A )}''' )
return ds_filter, duplicate_clusters
| 120 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def UpperCAmelCase_ ( A , A , A = 1 / sqrt(2 ) ):
'''simple docstring'''
_a : List[Any] = tau * frequency / samplerate
_a : Tuple = sin(A )
_a : List[Any] = cos(A )
_a : Union[str, Any] = _sin / (2 * q_factor)
_a : Dict = (1 - _cos) / 2
_a : Any = 1 - _cos
_a : Any = 1 + alpha
_a : int = -2 * _cos
_a : str = 1 - alpha
_a : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase_ ( A , A , A = 1 / sqrt(2 ) ):
'''simple docstring'''
_a : int = tau * frequency / samplerate
_a : int = sin(A )
_a : Union[str, Any] = cos(A )
_a : int = _sin / (2 * q_factor)
_a : Dict = (1 + _cos) / 2
_a : int = -1 - _cos
_a : Optional[int] = 1 + alpha
_a : str = -2 * _cos
_a : Dict = 1 - alpha
_a : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase_ ( A , A , A = 1 / sqrt(2 ) ):
'''simple docstring'''
_a : str = tau * frequency / samplerate
_a : Dict = sin(A )
_a : int = cos(A )
_a : Dict = _sin / (2 * q_factor)
_a : List[Any] = _sin / 2
_a : List[str] = 0
_a : Dict = -ba
_a : List[Any] = 1 + alpha
_a : Union[str, Any] = -2 * _cos
_a : List[Any] = 1 - alpha
_a : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase_ ( A , A , A = 1 / sqrt(2 ) ):
'''simple docstring'''
_a : Optional[Any] = tau * frequency / samplerate
_a : Tuple = sin(A )
_a : Tuple = cos(A )
_a : Dict = _sin / (2 * q_factor)
_a : List[Any] = 1 - alpha
_a : int = -2 * _cos
_a : List[Any] = 1 + alpha
_a : str = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def UpperCAmelCase_ ( A , A , A , A = 1 / sqrt(2 ) , ):
'''simple docstring'''
_a : Union[str, Any] = tau * frequency / samplerate
_a : str = sin(A )
_a : str = cos(A )
_a : List[Any] = _sin / (2 * q_factor)
_a : Optional[Any] = 1_0 ** (gain_db / 4_0)
_a : Dict = 1 + alpha * big_a
_a : str = -2 * _cos
_a : Tuple = 1 - alpha * big_a
_a : Tuple = 1 + alpha / big_a
_a : str = -2 * _cos
_a : Union[str, Any] = 1 - alpha / big_a
_a : Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase_ ( A , A , A , A = 1 / sqrt(2 ) , ):
'''simple docstring'''
_a : Optional[int] = tau * frequency / samplerate
_a : List[str] = sin(A )
_a : Tuple = cos(A )
_a : Union[str, Any] = _sin / (2 * q_factor)
_a : str = 1_0 ** (gain_db / 4_0)
_a : Optional[Any] = (big_a + 1) - (big_a - 1) * _cos
_a : List[str] = (big_a + 1) + (big_a - 1) * _cos
_a : List[Any] = (big_a - 1) - (big_a + 1) * _cos
_a : Dict = (big_a - 1) + (big_a + 1) * _cos
_a : Tuple = 2 * sqrt(A ) * alpha
_a : Any = big_a * (pmc + aaa)
_a : Optional[int] = 2 * big_a * mpc
_a : Dict = big_a * (pmc - aaa)
_a : List[str] = ppmc + aaa
_a : int = -2 * pmpc
_a : Tuple = ppmc - aaa
_a : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase_ ( A , A , A , A = 1 / sqrt(2 ) , ):
'''simple docstring'''
_a : Dict = tau * frequency / samplerate
_a : Tuple = sin(A )
_a : Any = cos(A )
_a : int = _sin / (2 * q_factor)
_a : str = 1_0 ** (gain_db / 4_0)
_a : List[Any] = (big_a + 1) - (big_a - 1) * _cos
_a : Union[str, Any] = (big_a + 1) + (big_a - 1) * _cos
_a : List[Any] = (big_a - 1) - (big_a + 1) * _cos
_a : List[str] = (big_a - 1) + (big_a + 1) * _cos
_a : Union[str, Any] = 2 * sqrt(A ) * alpha
_a : Optional[Any] = big_a * (ppmc + aaa)
_a : List[str] = -2 * big_a * pmpc
_a : Any = big_a * (ppmc - aaa)
_a : List[Any] = pmc + aaa
_a : Tuple = 2 * mpc
_a : List[Any] = pmc - aaa
_a : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 120 | 1 |
from __future__ import annotations
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> tuple[str, float]:
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif stress < 0:
raise ValueError("""Stress cannot be negative""" )
elif tangential_force < 0:
raise ValueError("""Tangential Force cannot be negative""" )
elif area < 0:
raise ValueError("""Area cannot be negative""" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 334 |
from __future__ import annotations
from typing import Any
class lowercase_ :
def __init__( self: Tuple, _lowercase: int):
'''simple docstring'''
__lowerCAmelCase = num_of_nodes
__lowerCAmelCase = []
__lowerCAmelCase = {}
def _lowercase ( self: str, _lowercase: int, _lowercase: int, _lowercase: int):
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight])
def _lowercase ( self: Optional[Any], _lowercase: int):
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node])
def _lowercase ( self: Any, _lowercase: int):
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
__lowerCAmelCase = self.find_component(_lowercase)
def _lowercase ( self: Tuple, _lowercase: list[int], _lowercase: int, _lowercase: int):
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
__lowerCAmelCase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowercase)
elif component_size[u_node] >= component_size[v_node]:
__lowerCAmelCase = self.find_component(_lowercase)
component_size[u_node] += component_size[v_node]
self.set_component(_lowercase)
def _lowercase ( self: Optional[Any]):
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = 0
__lowerCAmelCase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes):
self.m_component.update({node: node})
component_size.append(1)
__lowerCAmelCase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = edge
__lowerCAmelCase = self.m_component[u]
__lowerCAmelCase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__lowerCAmelCase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowercase, _lowercase):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = edge
__lowerCAmelCase = self.m_component[u]
__lowerCAmelCase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowercase, _lowercase, _lowercase)
print(f'''Added edge [{u} - {v}]\nAdded weight: {w}\n''')
num_of_components -= 1
__lowerCAmelCase = [-1] * self.m_num_of_nodes
print(f'''The total weight of the minimal spanning tree is: {mst_weight}''')
def UpperCAmelCase ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 334 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
snake_case : Dict = logging.get_logger(__name__)
snake_case : List[str] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all MVP models at https://huggingface.co/models?filter=mvp
snake_case : Tuple = {
'vocab_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json',
},
'added_tokens.json': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json',
},
'merges_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt',
},
'tokenizer_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json',
},
}
snake_case : Union[str, Any] = {
'RUCAIBox/mvp': 10_24,
}
class __lowercase ( UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : List[Any] = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE : Dict = MvpTokenizer
def __init__( self , A_=None , A_=None , A_=None , A_="replace" , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=False , A_=True , **A_ , )-> str:
super().__init__(
A_ , A_ , tokenizer_file=A_ , errors=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , trim_offsets=A_ , **A_ , )
_SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , A_ ) != add_prefix_space:
_SCREAMING_SNAKE_CASE = getattr(A_ , pre_tok_state.pop('type' ) )
_SCREAMING_SNAKE_CASE = add_prefix_space
_SCREAMING_SNAKE_CASE = pre_tok_class(**A_ )
_SCREAMING_SNAKE_CASE = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_SCREAMING_SNAKE_CASE = 'post_processor'
_SCREAMING_SNAKE_CASE = getattr(self.backend_tokenizer , A_ , A_ )
if tokenizer_component_instance:
_SCREAMING_SNAKE_CASE = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_SCREAMING_SNAKE_CASE = tuple(state['sep'] )
if "cls" in state:
_SCREAMING_SNAKE_CASE = tuple(state['cls'] )
_SCREAMING_SNAKE_CASE = False
if state.get('add_prefix_space' , A_ ) != add_prefix_space:
_SCREAMING_SNAKE_CASE = add_prefix_space
_SCREAMING_SNAKE_CASE = True
if state.get('trim_offsets' , A_ ) != trim_offsets:
_SCREAMING_SNAKE_CASE = trim_offsets
_SCREAMING_SNAKE_CASE = True
if changes_to_apply:
_SCREAMING_SNAKE_CASE = getattr(A_ , state.pop('type' ) )
_SCREAMING_SNAKE_CASE = component_class(**A_ )
setattr(self.backend_tokenizer , A_ , A_ )
@property
def __magic_name__ ( self )-> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __magic_name__ ( self , A_ )-> int:
_SCREAMING_SNAKE_CASE = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else value
_SCREAMING_SNAKE_CASE = value
def __magic_name__ ( self , *A_ , **A_ )-> BatchEncoding:
_SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , A_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*A_ , **A_ )
def __magic_name__ ( self , *A_ , **A_ )-> BatchEncoding:
_SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , A_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._encode_plus(*A_ , **A_ )
def __magic_name__ ( self , A_ , A_ = None )-> Tuple[str]:
_SCREAMING_SNAKE_CASE = self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
def __magic_name__ ( self , A_ , A_=None )-> Optional[Any]:
_SCREAMING_SNAKE_CASE = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __magic_name__ ( self , A_ , A_ = None )-> List[int]:
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 605 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
snake_case : Tuple = (7_20, 12_80) # Height, Width
snake_case : List[Any] = (0.4, 0.6) # if height or width lower than this scale, drop it.
snake_case : str = 1 / 1_00
snake_case : List[Any] = ''
snake_case : Union[str, Any] = ''
snake_case : Tuple = ''
snake_case : List[str] = 2_50
def SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = get_dataset(UpperCAmelCase__ ,UpperCAmelCase__ )
for index in range(UpperCAmelCase__ ):
_SCREAMING_SNAKE_CASE = random.sample(range(len(UpperCAmelCase__ ) ) ,4 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = update_image_and_anno(
UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,filter_scale=UpperCAmelCase__ ,)
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_SCREAMING_SNAKE_CASE = random_chars(32 )
_SCREAMING_SNAKE_CASE = path.split(os.sep )[-1].rsplit('.' ,1 )[0]
_SCREAMING_SNAKE_CASE = f'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(f'''{file_root}.jpg''' ,UpperCAmelCase__ ,[cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
_SCREAMING_SNAKE_CASE = []
for anno in new_annos:
_SCREAMING_SNAKE_CASE = anno[3] - anno[1]
_SCREAMING_SNAKE_CASE = anno[4] - anno[2]
_SCREAMING_SNAKE_CASE = anno[1] + width / 2
_SCREAMING_SNAKE_CASE = anno[2] + height / 2
_SCREAMING_SNAKE_CASE = f'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(UpperCAmelCase__ )
with open(f'''{file_root}.txt''' ,'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ,UpperCAmelCase__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for label_file in glob.glob(os.path.join(UpperCAmelCase__ ,'*.txt' ) ):
_SCREAMING_SNAKE_CASE = label_file.split(os.sep )[-1].rsplit('.' ,1 )[0]
with open(UpperCAmelCase__ ) as in_file:
_SCREAMING_SNAKE_CASE = in_file.readlines()
_SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase__ ,f'''{label_name}.jpg''' )
_SCREAMING_SNAKE_CASE = []
for obj_list in obj_lists:
_SCREAMING_SNAKE_CASE = obj_list.rstrip('\n' ).split(' ' )
_SCREAMING_SNAKE_CASE = float(obj[1] ) - float(obj[3] ) / 2
_SCREAMING_SNAKE_CASE = float(obj[2] ) - float(obj[4] ) / 2
_SCREAMING_SNAKE_CASE = float(obj[1] ) + float(obj[3] ) / 2
_SCREAMING_SNAKE_CASE = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(UpperCAmelCase__ )
labels.append(UpperCAmelCase__ )
return img_paths, labels
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ = 0.0 ,):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.zeros([output_size[0], output_size[1], 3] ,dtype=np.uinta )
_SCREAMING_SNAKE_CASE = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_SCREAMING_SNAKE_CASE = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_SCREAMING_SNAKE_CASE = int(scale_x * output_size[1] )
_SCREAMING_SNAKE_CASE = int(scale_y * output_size[0] )
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for i, index in enumerate(UpperCAmelCase__ ):
_SCREAMING_SNAKE_CASE = all_img_list[index]
path_list.append(UpperCAmelCase__ )
_SCREAMING_SNAKE_CASE = all_annos[index]
_SCREAMING_SNAKE_CASE = cva.imread(UpperCAmelCase__ )
if i == 0: # top-left
_SCREAMING_SNAKE_CASE = cva.resize(UpperCAmelCase__ ,(divid_point_x, divid_point_y) )
_SCREAMING_SNAKE_CASE = img
for bbox in img_annos:
_SCREAMING_SNAKE_CASE = bbox[1] * scale_x
_SCREAMING_SNAKE_CASE = bbox[2] * scale_y
_SCREAMING_SNAKE_CASE = bbox[3] * scale_x
_SCREAMING_SNAKE_CASE = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
_SCREAMING_SNAKE_CASE = cva.resize(UpperCAmelCase__ ,(output_size[1] - divid_point_x, divid_point_y) )
_SCREAMING_SNAKE_CASE = img
for bbox in img_annos:
_SCREAMING_SNAKE_CASE = scale_x + bbox[1] * (1 - scale_x)
_SCREAMING_SNAKE_CASE = bbox[2] * scale_y
_SCREAMING_SNAKE_CASE = scale_x + bbox[3] * (1 - scale_x)
_SCREAMING_SNAKE_CASE = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
_SCREAMING_SNAKE_CASE = cva.resize(UpperCAmelCase__ ,(divid_point_x, output_size[0] - divid_point_y) )
_SCREAMING_SNAKE_CASE = img
for bbox in img_annos:
_SCREAMING_SNAKE_CASE = bbox[1] * scale_x
_SCREAMING_SNAKE_CASE = scale_y + bbox[2] * (1 - scale_y)
_SCREAMING_SNAKE_CASE = bbox[3] * scale_x
_SCREAMING_SNAKE_CASE = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
_SCREAMING_SNAKE_CASE = cva.resize(
UpperCAmelCase__ ,(output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
_SCREAMING_SNAKE_CASE = img
for bbox in img_annos:
_SCREAMING_SNAKE_CASE = scale_x + bbox[1] * (1 - scale_x)
_SCREAMING_SNAKE_CASE = scale_y + bbox[2] * (1 - scale_y)
_SCREAMING_SNAKE_CASE = scale_x + bbox[3] * (1 - scale_x)
_SCREAMING_SNAKE_CASE = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
_SCREAMING_SNAKE_CASE = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
_SCREAMING_SNAKE_CASE = ascii_lowercase + digits
return "".join(random.choice(UpperCAmelCase__ ) for _ in range(UpperCAmelCase__ ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 605 | 1 |
import requests
__lowercase :Optional[Any] = "" # <-- Put your OpenWeatherMap appid here!
__lowercase :Any = "https://api.openweathermap.org/data/2.5/"
def UpperCAmelCase ( _lowerCamelCase : Dict = "Chicago" , _lowerCamelCase : str = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + "weather" , params=locals() ).json()
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] = "Kolkata, India" , _lowerCamelCase : Union[str, Any] = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + "forecast" , params=locals() ).json()
def UpperCAmelCase ( _lowerCamelCase : Union[str, Any] = 5_5.6_8 , _lowerCamelCase : Dict = 1_2.5_7 , _lowerCamelCase : Tuple = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + "onecall" , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
__lowercase :Tuple = input("Enter a location:").strip()
if location:
pprint(current_weather(location))
else:
break | 707 |
import numpy
class _a :
"""simple docstring"""
def __init__( self : Optional[int] , a : numpy.ndarray , a : numpy.ndarray ) ->None:
SCREAMING_SNAKE_CASE__ : Any = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
SCREAMING_SNAKE_CASE__ : int = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
SCREAMING_SNAKE_CASE__ : Dict = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
SCREAMING_SNAKE_CASE__ : List[Any] = numpy.random.rand(3 , 1 )
# Real output values provided.
SCREAMING_SNAKE_CASE__ : str = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
SCREAMING_SNAKE_CASE__ : Tuple = numpy.zeros(output_array.shape )
def A_ ( self : Union[str, Any] ) ->numpy.ndarray:
SCREAMING_SNAKE_CASE__ : List[Any] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
SCREAMING_SNAKE_CASE__ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
SCREAMING_SNAKE_CASE__ : int = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def A_ ( self : int ) ->None:
SCREAMING_SNAKE_CASE__ : Optional[int] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
SCREAMING_SNAKE_CASE__ : int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def A_ ( self : int , a : numpy.ndarray , a : int , a : bool ) ->None:
for iteration in range(1 , iterations + 1 ):
SCREAMING_SNAKE_CASE__ : Dict = self.feedforward()
self.back_propagation()
if give_loss:
SCREAMING_SNAKE_CASE__ : int = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"""Iteration {iteration} Loss: {loss}""" )
def A_ ( self : Tuple , a : numpy.ndarray ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[int] = input_arr
SCREAMING_SNAKE_CASE__ : Dict = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
SCREAMING_SNAKE_CASE__ : Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def UpperCAmelCase ( _lowerCamelCase : numpy.ndarray ):
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def UpperCAmelCase ( _lowerCamelCase : numpy.ndarray ):
'''simple docstring'''
return (value) * (1 - (value))
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
SCREAMING_SNAKE_CASE__ : Any = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
SCREAMING_SNAKE_CASE__ : List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=_lowerCamelCase , output_array=_lowerCamelCase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_lowerCamelCase , iterations=10 , give_loss=_lowerCamelCase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example() | 26 | 0 |
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
snake_case : List[str] = (
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def __lowerCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] ):
"""simple docstring"""
warnings.warn(UpperCAmelCase_ , UpperCAmelCase_ )
requires_backends(UpperCAmelCase_ , '''sklearn''' )
return (preds == labels).mean()
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : int ):
"""simple docstring"""
warnings.warn(UpperCAmelCase_ , UpperCAmelCase_ )
requires_backends(UpperCAmelCase_ , '''sklearn''' )
a :Dict = simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )
a :Any = fa_score(y_true=UpperCAmelCase_ , y_pred=UpperCAmelCase_ )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def __lowerCamelCase ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ):
"""simple docstring"""
warnings.warn(UpperCAmelCase_ , UpperCAmelCase_ )
requires_backends(UpperCAmelCase_ , '''sklearn''' )
a :str = pearsonr(UpperCAmelCase_ , UpperCAmelCase_ )[0]
a :Union[str, Any] = spearmanr(UpperCAmelCase_ , UpperCAmelCase_ )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict ):
"""simple docstring"""
warnings.warn(UpperCAmelCase_ , UpperCAmelCase_ )
requires_backends(UpperCAmelCase_ , '''sklearn''' )
assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ ), F'''Predictions and labels have mismatched lengths {len(UpperCAmelCase_ )} and {len(UpperCAmelCase_ )}'''
if task_name == "cola":
return {"mcc": matthews_corrcoef(UpperCAmelCase_ , UpperCAmelCase_ )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
elif task_name == "mrpc":
return acc_and_fa(UpperCAmelCase_ , UpperCAmelCase_ )
elif task_name == "sts-b":
return pearson_and_spearman(UpperCAmelCase_ , UpperCAmelCase_ )
elif task_name == "qqp":
return acc_and_fa(UpperCAmelCase_ , UpperCAmelCase_ )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
elif task_name == "qnli":
return {"acc": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
elif task_name == "rte":
return {"acc": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
elif task_name == "wnli":
return {"acc": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
elif task_name == "hans":
return {"acc": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
else:
raise KeyError(UpperCAmelCase_ )
def __lowerCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] ):
"""simple docstring"""
warnings.warn(UpperCAmelCase_ , UpperCAmelCase_ )
requires_backends(UpperCAmelCase_ , '''sklearn''' )
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
raise ValueError(F'''Predictions and labels have mismatched lengths {len(UpperCAmelCase_ )} and {len(UpperCAmelCase_ )}''' )
if task_name == "xnli":
return {"acc": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
else:
raise KeyError(UpperCAmelCase_ )
| 445 |
from __future__ import annotations
from typing import Any
class _snake_case ( _snake_case ):
pass
class _snake_case :
def __init__( self , _lowerCamelCase ):
a :Any = data
a :Node | None = None
def __iter__( self ):
a :Dict = self
a :Optional[int] = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(_lowerCamelCase )
yield node.data
a :str = node.next_node
@property
def SCREAMING_SNAKE_CASE__ ( self ):
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
snake_case : int = Node(1)
snake_case : int = Node(2)
snake_case : Optional[int] = Node(3)
snake_case : List[str] = Node(4)
print(root_node.has_loop) # False
snake_case : Optional[Any] = root_node.next_node
print(root_node.has_loop) # True
snake_case : Dict = Node(5)
snake_case : List[Any] = Node(6)
snake_case : Optional[int] = Node(5)
snake_case : Union[str, Any] = Node(6)
print(root_node.has_loop) # False
snake_case : Tuple = Node(1)
print(root_node.has_loop) # False
| 445 | 1 |
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
_a : int = open # noqa: we just need to have a builtin inside this module to test it properly
| 721 |
def UpperCamelCase__ ( _A: list , _A: list , _A: int ):
'''simple docstring'''
if len(_A ) != len(_A ):
raise ValueError("""The length of profit and weight must be same.""" )
if max_weight <= 0:
raise ValueError("""max_weight must greater than zero.""" )
if any(p < 0 for p in profit ):
raise ValueError("""Profit can not be negative.""" )
if any(w < 0 for w in weight ):
raise ValueError("""Weight can not be negative.""" )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
__lowerCamelCase = [p / w for p, w in zip(_A , _A )]
# Creating a copy of the list and sorting profit/weight in ascending order
__lowerCamelCase = sorted(_A )
# declaring useful variables
__lowerCamelCase = len(_A )
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
__lowerCamelCase = sorted_profit_by_weight[length - i - 1]
__lowerCamelCase = profit_by_weight.index(_A )
__lowerCamelCase = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'Input profits, weights, and then max_weight (all positive ints) separated by '
'spaces.'
)
_a : List[Any] = [int(x) for x in input('Input profits separated by spaces: ').split()]
_a : List[Any] = [int(x) for x in input('Input weights separated by spaces: ').split()]
_a : Any = int(input('Max weight allowed: '))
# Function Call
calc_profit(profit, weight, max_weight)
| 571 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any=False ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'deit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'deit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'deit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'deit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'deit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'deit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'deit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'deit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'deit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'deit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
lowerCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def _snake_case ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any=False ) -> List[Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase = ''
else:
lowerCAmelCase = 'deit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
lowerCAmelCase = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase = in_proj_bias[: config.hidden_size]
lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : int ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = dct.pop(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase = val
def _snake_case ( ) -> Dict:
"""simple docstring"""
lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCAmelCase = DeiTConfig()
# all deit models have fine-tuned heads
lowerCAmelCase = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
lowerCAmelCase = 1_000
lowerCAmelCase = 'huggingface/label-files'
lowerCAmelCase = 'imagenet-1k-id2label.json'
lowerCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
lowerCAmelCase = idalabel
lowerCAmelCase = {v: k for k, v in idalabel.items()}
lowerCAmelCase = int(deit_name[-6:-4] )
lowerCAmelCase = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
lowerCAmelCase = 192
lowerCAmelCase = 768
lowerCAmelCase = 12
lowerCAmelCase = 3
elif deit_name[9:].startswith("""small""" ):
lowerCAmelCase = 384
lowerCAmelCase = 1_536
lowerCAmelCase = 12
lowerCAmelCase = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
lowerCAmelCase = 1_024
lowerCAmelCase = 4_096
lowerCAmelCase = 24
lowerCAmelCase = 16
# load original model from timm
lowerCAmelCase = timm.create_model(SCREAMING_SNAKE_CASE__ , pretrained=SCREAMING_SNAKE_CASE__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCAmelCase = timm_model.state_dict()
lowerCAmelCase = create_rename_keys(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
read_in_q_k_v(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# load HuggingFace model
lowerCAmelCase = DeiTForImageClassificationWithTeacher(SCREAMING_SNAKE_CASE__ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# Check outputs on an image, prepared by DeiTImageProcessor
lowerCAmelCase = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
lowerCAmelCase = DeiTImageProcessor(size=SCREAMING_SNAKE_CASE__ , crop_size=config.image_size )
lowerCAmelCase = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCAmelCase = encoding['pixel_values']
lowerCAmelCase = model(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase = timm_model(SCREAMING_SNAKE_CASE__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE__ , outputs.logits , atol=1E-3 )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
print(f'Saving model {deit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCAmelCase = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path) | 433 |
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
lowerCamelCase = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def a_ ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
for attribute in key.split('.' ):
_lowerCamelCase : Optional[Any] =getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if weight_type is not None:
_lowerCamelCase : str =getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape
else:
_lowerCamelCase : Optional[Any] =hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_lowerCamelCase : Tuple =value
elif weight_type == "weight_g":
_lowerCamelCase : Any =value
elif weight_type == "weight_v":
_lowerCamelCase : Any =value
elif weight_type == "bias":
_lowerCamelCase : Dict =value
else:
_lowerCamelCase : Union[str, Any] =value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def a_ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] =[]
_lowerCamelCase : Optional[Any] =fairseq_model.state_dict()
_lowerCamelCase : Tuple =hf_model.feature_extractor
_lowerCamelCase : int =hf_model.adapter
for name, value in fairseq_dict.items():
_lowerCamelCase : Optional[Any] =False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == 'group' , )
_lowerCamelCase : Optional[int] =True
elif any(x in name for x in ['adaptor', 'w2v_encoder.proj.', 'w2v_proj_ln.'] ):
load_adapter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : int =True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
_lowerCamelCase : Dict =True
if "*" in mapped_key:
_lowerCamelCase : List[str] =name.split(SCREAMING_SNAKE_CASE__ )[0].split('.' )[-2]
_lowerCamelCase : Dict =mapped_key.replace('*' , SCREAMING_SNAKE_CASE__ )
if "weight_g" in name:
_lowerCamelCase : List[Any] ='weight_g'
elif "weight_v" in name:
_lowerCamelCase : int ='weight_v'
elif "bias" in name:
_lowerCamelCase : int ='bias'
elif "weight" in name:
_lowerCamelCase : Optional[int] ='weight'
else:
_lowerCamelCase : Any =None
set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def a_ ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
_lowerCamelCase : str =full_name.split('conv_layers.' )[-1]
_lowerCamelCase : List[Any] =name.split('.' )
_lowerCamelCase : Optional[int] =int(items[0] )
_lowerCamelCase : List[Any] =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_lowerCamelCase : Any =value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_lowerCamelCase : int =value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_lowerCamelCase : List[Any] =value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_lowerCamelCase : Tuple =value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
def a_ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[int] =full_name.split('adaptor.' )[-1]
_lowerCamelCase : str =name.split('.' )
if items[1].isdigit():
_lowerCamelCase : Any =int(items[1] )
else:
_lowerCamelCase : int =None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
_lowerCamelCase : str =value
logger.info(F'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
_lowerCamelCase : str =value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
_lowerCamelCase : Optional[Any] =value
logger.info(F'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
_lowerCamelCase : Optional[Any] =value
logger.info(F'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
_lowerCamelCase : List[Any] =value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
_lowerCamelCase : Dict =value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
def a_ ( SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase : List[Any] =emb.weight.shape
_lowerCamelCase : str =nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : Optional[Any] =emb.weight.data
return lin_layer
@torch.no_grad()
def a_ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , ):
'''simple docstring'''
_lowerCamelCase : int =WavaVecaConfig.from_pretrained(
SCREAMING_SNAKE_CASE__ , add_adapter=SCREAMING_SNAKE_CASE__ , adapter_stride=SCREAMING_SNAKE_CASE__ , adapter_kernel_size=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , output_hidden_size=SCREAMING_SNAKE_CASE__ , )
_lowerCamelCase : str =MBartConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
# load model
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'config_yaml': config_yaml_path,
'data': '/'.join(dict_path.split('/' )[:-1] ),
'w2v_path': checkpoint_path,
'load_pretrained_decoder_from': None,
} , )
_lowerCamelCase : Any =model[0].eval()
# load feature extractor
_lowerCamelCase : List[Any] =WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ )
# set weights for wav2vec2 encoder
_lowerCamelCase : List[Any] =WavaVecaModel(SCREAMING_SNAKE_CASE__ )
recursively_load_weights_wavaveca(model.encoder , SCREAMING_SNAKE_CASE__ )
# load decoder weights
_lowerCamelCase : int =MBartForCausalLM(SCREAMING_SNAKE_CASE__ )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] =hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=SCREAMING_SNAKE_CASE__ )
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
_lowerCamelCase : Dict =SpeechEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE__ , decoder=SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : List[Any] =False
_lowerCamelCase : Any =MBartaaTokenizer(SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : int =hf_wavavec.config.to_dict()
_lowerCamelCase : Optional[int] =tokenizer.pad_token_id
_lowerCamelCase : Tuple =tokenizer.bos_token_id
_lowerCamelCase : Any =tokenizer.eos_token_id
_lowerCamelCase : int ='mbart50'
_lowerCamelCase : Tuple ='wav2vec2'
_lowerCamelCase : Tuple =tokenizer.eos_token_id
_lowerCamelCase : Dict =250_004
_lowerCamelCase : List[str] =tokenizer.eos_token_id
_lowerCamelCase : Dict =SpeechEncoderDecoderConfig.from_dict(SCREAMING_SNAKE_CASE__ )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE__ )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_yaml_path', default=None, type=str, help='Path to yaml file of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-xls-r-1b',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/mbart-large-50-one-to-many-mmt',
type=str,
help='Path to hf decoder checkpoint config',
)
parser.add_argument('--add_adapter', default=True, type=bool, help='whethere to add model adapter layers')
parser.add_argument('--adapter_stride', default=2, type=int, help='stride of adapter layers')
parser.add_argument('--adapter_kernel_size', default=3, type=int, help='kernel size of adapter layers')
parser.add_argument('--encoder_output_dim', default=10_24, type=int, help='encoder output dim')
parser.add_argument('--start_token_id', default=25_00_04, type=int, help='`decoder_start_token_id` of model config')
lowerCamelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 464 | 0 |
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def snake_case ( snake_case : np.ndarray , snake_case : float ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase = math.sqrt(snake_case )
lowerCAmelCase = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def snake_case ( snake_case : np.ndarray , snake_case : int , snake_case : int , snake_case : int ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def snake_case ( snake_case : int , snake_case : float ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase = np.zeros((kernel_size, kernel_size) )
for i in range(0 , snake_case ):
for j in range(0 , snake_case ):
lowerCAmelCase = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(snake_case , snake_case )
def snake_case ( snake_case : np.ndarray , snake_case : float , snake_case : float , snake_case : int , ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase = np.zeros(img.shape )
lowerCAmelCase = get_gauss_kernel(snake_case , snake_case )
lowerCAmelCase , lowerCAmelCase = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
lowerCAmelCase = get_slice(snake_case , snake_case , snake_case , snake_case )
lowerCAmelCase = img_s - img_s[kernel_size // 2, kernel_size // 2]
lowerCAmelCase = vec_gaussian(snake_case , snake_case )
lowerCAmelCase = np.multiply(snake_case , snake_case )
lowerCAmelCase = np.multiply(snake_case , snake_case )
lowerCAmelCase = np.sum(snake_case ) / np.sum(snake_case )
lowerCAmelCase = val
return imga
def snake_case ( snake_case : list ) -> tuple:
"""simple docstring"""
lowerCAmelCase = args[1] if args[1:] else '../image_data/lena.jpg'
lowerCAmelCase = float(args[2] ) if args[2:] else 1.0
lowerCAmelCase = float(args[3] ) if args[3:] else 1.0
if args[4:]:
lowerCAmelCase = int(args[4] )
lowerCAmelCase = kernel_size + abs(kernel_size % 2 - 1 )
else:
lowerCAmelCase = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
_UpperCamelCase : Any = parse_args(sys.argv)
_UpperCamelCase : Tuple = cva.imread(filename, 0)
cva.imshow("input image", img)
_UpperCamelCase : int = img / 255
_UpperCamelCase : Optional[Any] = out.astype("float32")
_UpperCamelCase : List[str] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
_UpperCamelCase : Optional[Any] = out * 255
_UpperCamelCase : Union[str, Any] = np.uinta(out)
cva.imshow("output image", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 703 |
'''simple docstring'''
def snake_case ( snake_case : int ) -> int:
"""simple docstring"""
assert (
isinstance(snake_case , snake_case ) and number_of_steps > 0
), F'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
lowerCAmelCase , lowerCAmelCase = 1, 1
for _ in range(number_of_steps - 1 ):
lowerCAmelCase , lowerCAmelCase = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 514 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __UpperCAmelCase ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : str = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def A ( self : Tuple , A_ : Any=0 )-> Union[str, Any]:
__UpperCamelCase = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(A_ ) )
__UpperCamelCase = np.random.RandomState(A_ )
__UpperCamelCase = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def A ( self : str )-> List[str]:
__UpperCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = self.get_dummy_inputs()
__UpperCamelCase = pipe(**A_ ).images
__UpperCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_28, 1_28, 3)
__UpperCamelCase = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def A ( self : List[str] )-> int:
__UpperCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
__UpperCamelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=A_ )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = self.get_dummy_inputs()
__UpperCamelCase = pipe(**A_ ).images
__UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
__UpperCamelCase = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def A ( self : List[str] )-> Dict:
__UpperCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
__UpperCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
# warmup pass to apply optimizations
__UpperCamelCase = pipe(**self.get_dummy_inputs() )
__UpperCamelCase = self.get_dummy_inputs()
__UpperCamelCase = pipe(**A_ ).images
__UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
__UpperCamelCase = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def A ( self : Tuple )-> Tuple:
__UpperCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
__UpperCamelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = self.get_dummy_inputs()
__UpperCamelCase = pipe(**A_ ).images
__UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
__UpperCamelCase = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def A ( self : Any )-> Optional[Any]:
__UpperCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
__UpperCamelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = self.get_dummy_inputs()
__UpperCamelCase = pipe(**A_ ).images
__UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
__UpperCamelCase = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def A ( self : Any )-> int:
__UpperCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
__UpperCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = self.get_dummy_inputs()
__UpperCamelCase = pipe(**A_ ).images
__UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
__UpperCamelCase = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def A ( self : Optional[Any] )-> List[str]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def A ( self : Any )-> Any:
__UpperCamelCase = ort.SessionOptions()
__UpperCamelCase = False
return options
def A ( self : Optional[Any] )-> Optional[int]:
__UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
__UpperCamelCase = init_image.resize((7_68, 5_12) )
# using the PNDM scheduler by default
__UpperCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=A_ , feature_extractor=A_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = "A fantasy landscape, trending on artstation"
__UpperCamelCase = np.random.RandomState(0 )
__UpperCamelCase = pipe(
prompt=A_ , image=A_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=A_ , output_type="np" , )
__UpperCamelCase = output.images
__UpperCamelCase = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
__UpperCamelCase = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def A ( self : Any )-> Union[str, Any]:
__UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
__UpperCamelCase = init_image.resize((7_68, 5_12) )
__UpperCamelCase = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
__UpperCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=A_ , safety_checker=A_ , feature_extractor=A_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = "A fantasy landscape, trending on artstation"
__UpperCamelCase = np.random.RandomState(0 )
__UpperCamelCase = pipe(
prompt=A_ , image=A_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=A_ , output_type="np" , )
__UpperCamelCase = output.images
__UpperCamelCase = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
__UpperCamelCase = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 | 505 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
_A = logging.get_logger(__name__)
_A = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all MVP models at https://huggingface.co/models?filter=mvp
_A = {
"vocab_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json",
},
"added_tokens.json": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json",
},
"merges_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt",
},
"tokenizer_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json",
},
}
_A = {
"RUCAIBox/mvp": 1_024,
}
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : Dict = VOCAB_FILES_NAMES
_snake_case : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Tuple = ['input_ids', 'attention_mask']
_snake_case : Any = MvpTokenizer
def __init__( self : str , A_ : int=None , A_ : List[Any]=None , A_ : Optional[Any]=None , A_ : int="replace" , A_ : int="<s>" , A_ : Any="</s>" , A_ : List[str]="</s>" , A_ : Optional[int]="<s>" , A_ : Optional[int]="<unk>" , A_ : Optional[int]="<pad>" , A_ : Union[str, Any]="<mask>" , A_ : str=False , A_ : List[str]=True , **A_ : Union[str, Any] , )-> Any:
super().__init__(
A_ , A_ , tokenizer_file=A_ , errors=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , trim_offsets=A_ , **A_ , )
__UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , A_ ) != add_prefix_space:
__UpperCamelCase = getattr(A_ , pre_tok_state.pop("type" ) )
__UpperCamelCase = add_prefix_space
__UpperCamelCase = pre_tok_class(**A_ )
__UpperCamelCase = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__UpperCamelCase = "post_processor"
__UpperCamelCase = getattr(self.backend_tokenizer , A_ , A_ )
if tokenizer_component_instance:
__UpperCamelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__UpperCamelCase = tuple(state["sep"] )
if "cls" in state:
__UpperCamelCase = tuple(state["cls"] )
__UpperCamelCase = False
if state.get("add_prefix_space" , A_ ) != add_prefix_space:
__UpperCamelCase = add_prefix_space
__UpperCamelCase = True
if state.get("trim_offsets" , A_ ) != trim_offsets:
__UpperCamelCase = trim_offsets
__UpperCamelCase = True
if changes_to_apply:
__UpperCamelCase = getattr(A_ , state.pop("type" ) )
__UpperCamelCase = component_class(**A_ )
setattr(self.backend_tokenizer , A_ , A_ )
@property
def A ( self : List[str] )-> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A ( self : Any , A_ : List[Any] )-> List[Any]:
__UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else value
__UpperCamelCase = value
def A ( self : str , *A_ : Dict , **A_ : Dict )-> BatchEncoding:
__UpperCamelCase = kwargs.get("is_split_into_words" , A_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*A_ , **A_ )
def A ( self : Tuple , *A_ : str , **A_ : List[str] )-> BatchEncoding:
__UpperCamelCase = kwargs.get("is_split_into_words" , A_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._encode_plus(*A_ , **A_ )
def A ( self : Optional[int] , A_ : str , A_ : Optional[str] = None )-> Tuple[str]:
__UpperCamelCase = self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
def A ( self : Any , A_ : Dict , A_ : Dict=None )-> Union[str, Any]:
__UpperCamelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A ( self : Optional[int] , A_ : List[int] , A_ : Optional[List[int]] = None )-> List[int]:
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 505 | 1 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
__snake_case : Dict = datasets.logging.get_logger(__name__)
__snake_case : Any = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
__snake_case : List[Any] = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
__snake_case : Dict = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="dummy_doc" ):
"""simple docstring"""
UpperCAmelCase__ :Union[str, Any] = {doc: key_lines}
UpperCAmelCase__ :List[str] = {doc: sys_lines}
UpperCAmelCase__ :List[str] = {}
UpperCAmelCase__ :Optional[int] = 0
UpperCAmelCase__ :Dict = 0
UpperCAmelCase__ :str = 0
UpperCAmelCase__ :str = 0
UpperCAmelCase__ :int = 0
UpperCAmelCase__ :Any = 0
UpperCAmelCase__ , UpperCAmelCase__ :Tuple = reader.get_doc_mentions(SCREAMING_SNAKE_CASE , key_doc_lines[doc] , SCREAMING_SNAKE_CASE )
key_singletons_num += singletons_num
if NP_only or min_span:
UpperCAmelCase__ :Tuple = reader.set_annotated_parse_trees(SCREAMING_SNAKE_CASE , key_doc_lines[doc] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCAmelCase__ , UpperCAmelCase__ :Dict = reader.get_doc_mentions(SCREAMING_SNAKE_CASE , sys_doc_lines[doc] , SCREAMING_SNAKE_CASE )
sys_singletons_num += singletons_num
if NP_only or min_span:
UpperCAmelCase__ :Optional[int] = reader.set_annotated_parse_trees(SCREAMING_SNAKE_CASE , key_doc_lines[doc] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if remove_nested:
UpperCAmelCase__ , UpperCAmelCase__ :Dict = reader.remove_nested_coref_mentions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
UpperCAmelCase__ , UpperCAmelCase__ :List[Any] = reader.remove_nested_coref_mentions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
UpperCAmelCase__ :Union[str, Any] = reader.get_mention_assignments(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :Tuple = reader.get_mention_assignments(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :Dict = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
f"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
'Number of resulting singleton clusters in the key '
f"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
f"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
'files, respectively' )
return doc_coref_infos
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase__ :Any = get_coref_infos(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :int = {}
UpperCAmelCase__ :List[Any] = 0
UpperCAmelCase__ :Any = 0
for name, metric in metrics:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ :Optional[int] = evaluator.evaluate_documents(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"""{name}/recall""": recall, f"""{name}/precision""": precision, f"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , f"""Recall: {recall * 100:.2f}""" , f""" Precision: {precision * 100:.2f}""" , f""" F1: {fa * 100:.2f}""" , )
if conll_subparts_num == 3:
UpperCAmelCase__ :List[Any] = (conll / 3) * 100
logger.info(f"""CoNLL score: {conll:.2f}""" )
output_scores.update({'conll_score': conll} )
return output_scores
def A ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase__ :Tuple = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
UpperCAmelCase__ :Any = line.split()[5]
if not parse_col == "-":
UpperCAmelCase__ :List[str] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCamelCase__ ( datasets.Metric):
'''simple docstring'''
def A__ ( self ) ->Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Sequence(datasets.Value('string' ) ),
} ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] , )
def A__ ( self , A , A , A=True , A=False , A=False , A=False ) ->int:
UpperCAmelCase__ :List[Any] = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
UpperCAmelCase__ :List[Any] = util.check_gold_parse_annotation(A )
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
UpperCAmelCase__ :Optional[Any] = evaluate(
key_lines=A , sys_lines=A , metrics=A , NP_only=A , remove_nested=A , keep_singletons=A , min_span=A , )
return score
| 433 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
__snake_case : Optional[Any] = '\\n\n'
__snake_case : List[Any] = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
__snake_case : Tuple = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCamelCase__ ( datasets.Metric):
'''simple docstring'''
def A__ ( self ) ->int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , )
def A__ ( self , A , A , A = 16 , A = True , A=None ) ->Tuple:
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCAmelCase__ :Union[str, Any] = 'cuda'
else:
UpperCAmelCase__ :Optional[int] = 'cuda' if torch.cuda.is_available() else 'cpu'
UpperCAmelCase__ :Optional[int] = AutoModelForCausalLM.from_pretrained(A )
UpperCAmelCase__ :Any = model.to(A )
UpperCAmelCase__ :Optional[int] = AutoTokenizer.from_pretrained(A )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCAmelCase__ :str = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(A ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCAmelCase__ :List[Any] = model.config.max_length - 1
else:
UpperCAmelCase__ :List[Any] = model.config.max_length
UpperCAmelCase__ :Tuple = tokenizer(
A , add_special_tokens=A , padding=A , truncation=A , max_length=A , return_tensors='pt' , return_attention_mask=A , ).to(A )
UpperCAmelCase__ :List[Any] = encodings['input_ids']
UpperCAmelCase__ :str = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCAmelCase__ :Union[str, Any] = []
UpperCAmelCase__ :int = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 , len(A ) , A ) ):
UpperCAmelCase__ :int = min(start_index + batch_size , len(A ) )
UpperCAmelCase__ :str = encoded_texts[start_index:end_index]
UpperCAmelCase__ :List[Any] = attn_masks[start_index:end_index]
if add_start_token:
UpperCAmelCase__ :List[Any] = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(A )
UpperCAmelCase__ :Any = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
UpperCAmelCase__ :Optional[int] = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(A ), attn_mask] , dim=1 )
UpperCAmelCase__ :int = encoded_batch
with torch.no_grad():
UpperCAmelCase__ :Optional[Any] = model(A , attention_mask=A ).logits
UpperCAmelCase__ :str = out_logits[..., :-1, :].contiguous()
UpperCAmelCase__ :Dict = labels[..., 1:].contiguous()
UpperCAmelCase__ :Any = attn_mask[..., 1:].contiguous()
UpperCAmelCase__ :int = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , A ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(A )}
| 433 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( A_ , A_ , A_ ):
# Initialise PyTorch model
UpperCAmelCase_ = MobileBertConfig.from_json_file(A_ )
print(F"""Building PyTorch model from configuration: {config}""" )
UpperCAmelCase_ = MobileBertForPreTraining(A_ )
# Load weights from tf checkpoint
UpperCAmelCase_ = load_tf_weights_in_mobilebert(A_ , A_ , A_ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , A_ )
if __name__ == "__main__":
__snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__snake_case : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 660 | '''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
__snake_case : Optional[int] = logging.getLogger(__name__)
def lowerCamelCase__ ( A_ , A_ ):
# save results
if os.path.exists(A_ ):
if os.path.exists(os.path.join(A_ , "config.json" ) ) and os.path.isfile(
os.path.join(A_ , "config.json" ) ):
os.remove(os.path.join(A_ , "config.json" ) )
if os.path.exists(os.path.join(A_ , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(A_ , "pytorch_model.bin" ) ):
os.remove(os.path.join(A_ , "pytorch_model.bin" ) )
else:
os.makedirs(A_ )
model.save_pretrained(A_ )
def lowerCamelCase__ ( A_ , A_=False ):
UpperCAmelCase_ = 2
if unlogit:
UpperCAmelCase_ = torch.pow(A_ , A_ )
UpperCAmelCase_ = p * torch.log(A_ )
UpperCAmelCase_ = 0
return -plogp.sum(dim=-1 )
def lowerCamelCase__ ( A_ ):
logger.info("lv, h >\t" + "\t".join(F"""{x + 1}""" for x in range(len(A_ ) ) ) )
for row in range(len(A_ ) ):
if tensor.dtype != torch.long:
logger.info(F"""layer {row + 1}:\t""" + "\t".join(F"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(F"""layer {row + 1}:\t""" + "\t".join(F"""{x:d}""" for x in tensor[row].cpu().data ) )
def lowerCamelCase__ ( A_ , A_ , A_ , A_=True , A_=True , A_=None , A_=False ):
UpperCAmelCase_ , UpperCAmelCase_ = model.config.num_hidden_layers, model.config.num_attention_heads
UpperCAmelCase_ = torch.zeros(A_ , A_ ).to(args.device )
UpperCAmelCase_ = torch.zeros(A_ , A_ ).to(args.device )
if head_mask is None:
UpperCAmelCase_ = torch.ones(A_ , A_ ).to(args.device )
head_mask.requires_grad_(requires_grad=A_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
UpperCAmelCase_ = None
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = 0.0
for step, inputs in enumerate(tqdm(A_ , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
UpperCAmelCase_ = tuple(t.to(args.device ) for t in inputs )
((UpperCAmelCase_) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
UpperCAmelCase_ = model(A_ , labels=A_ , head_mask=A_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(A_ ):
UpperCAmelCase_ = entropy(attn.detach() , A_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(A_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
UpperCAmelCase_ = 2
UpperCAmelCase_ = torch.pow(torch.pow(A_ , A_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
UpperCAmelCase_ = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(A_ )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(A_ )
logger.info("Head ranked by importance scores" )
UpperCAmelCase_ = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
UpperCAmelCase_ = torch.arange(
head_importance.numel() , device=args.device )
UpperCAmelCase_ = head_ranks.view_as(A_ )
print_ad_tensor(A_ )
return attn_entropy, head_importance, total_loss
def lowerCamelCase__ ( A_ , A_ , A_ ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(A_ , A_ , A_ , compute_entropy=A_ )
UpperCAmelCase_ = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , A_ , original_score * args.masking_threshold )
UpperCAmelCase_ = torch.ones_like(A_ )
UpperCAmelCase_ = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
UpperCAmelCase_ = original_score
while current_score >= original_score * args.masking_threshold:
UpperCAmelCase_ = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
UpperCAmelCase_ = float("Inf" )
UpperCAmelCase_ = head_importance.view(-1 ).sort()[1]
if len(A_ ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
UpperCAmelCase_ = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
UpperCAmelCase_ = new_head_mask.view(-1 )
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = new_head_mask.view_as(A_ )
UpperCAmelCase_ = new_head_mask.clone().detach()
print_ad_tensor(A_ )
# Compute metric and head importance again
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , head_mask=A_ )
UpperCAmelCase_ = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , A_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("Final head mask" )
print_ad_tensor(A_ )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCamelCase__ ( A_ , A_ , A_ , A_ ):
UpperCAmelCase_ = datetime.now()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , compute_importance=A_ , head_mask=A_ )
UpperCAmelCase_ = 1 / loss
UpperCAmelCase_ = datetime.now() - before_time
UpperCAmelCase_ = sum(p.numel() for p in model.parameters() )
UpperCAmelCase_ = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(A_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(A_ , A_ ):
UpperCAmelCase_ = [
v,
]
assert sum(len(A_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(A_ )
UpperCAmelCase_ = sum(p.numel() for p in model.parameters() )
UpperCAmelCase_ = datetime.now()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , compute_importance=A_ , head_mask=A_ , actually_pruned=A_ , )
UpperCAmelCase_ = 1 / loss
UpperCAmelCase_ = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , A_ , A_ , pruned_num_params / original_num_params * 100 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , A_ , A_ )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 )
save_model(A_ , args.output_dir )
def lowerCamelCase__ ( ):
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=A_ , type=A_ , required=A_ , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=A_ , type=A_ , required=A_ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=A_ , type=A_ , required=A_ , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=A_ , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=A_ , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=A_ , type=A_ , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=A_ , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=A_ , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=A_ , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=A_ , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=128 , type=A_ , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=A_ , help="Batch size." )
parser.add_argument("--seed" , type=A_ , default=42 )
parser.add_argument("--local_rank" , type=A_ , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=A_ , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=A_ , default="" , help="Can be used for distant debugging." )
UpperCAmelCase_ = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=A_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
UpperCAmelCase_ = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
UpperCAmelCase_ = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
UpperCAmelCase_ = torch.device("cuda" , args.local_rank )
UpperCAmelCase_ = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
UpperCAmelCase_ = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
UpperCAmelCase_ = nn.parallel.DistributedDataParallel(
A_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=A_ )
elif args.n_gpu > 1:
UpperCAmelCase_ = nn.DataParallel(A_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=A_ )
torch.save(A_ , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , A_ )
# Prepare dataset
UpperCAmelCase_ = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
UpperCAmelCase_ = (torch.from_numpy(A_ ),)
UpperCAmelCase_ = TensorDataset(*A_ )
UpperCAmelCase_ = RandomSampler(A_ )
UpperCAmelCase_ = DataLoader(A_ , sampler=A_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(A_ , A_ , A_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
UpperCAmelCase_ = mask_heads(A_ , A_ , A_ )
prune_heads(A_ , A_ , A_ , A_ )
if __name__ == "__main__":
main()
| 660 | 1 |
'''simple docstring'''
import datasets
a_ : Union[str, Any] = """\
@InProceedings{conneau2018xnli,
author = \"Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin\",
title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",
booktitle = \"Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing\",
year = \"2018\",
publisher = \"Association for Computational Linguistics\",
location = \"Brussels, Belgium\",
}
"""
a_ : Union[str, Any] = """\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
"""
a_ : Tuple = """
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
'accuracy': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric(\"xnli\")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
"""
def __snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] ):
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
return {"accuracy": simple_accuracy(UpperCamelCase , UpperCamelCase )}
| 445 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class snake_case :
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="resnet50" , UpperCamelCase=3 , UpperCamelCase=32 , UpperCamelCase=3 , UpperCamelCase=True , UpperCamelCase=True , ):
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = out_indices if out_indices is not None else [4]
lowerCamelCase_ = stage_names
lowerCamelCase_ = out_features
lowerCamelCase_ = backbone
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = use_pretrained_backbone
lowerCamelCase_ = is_training
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = self.get_config()
return config, pixel_values
def snake_case ( self ):
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = TimmBackbone(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(UpperCamelCase )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ ,lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class snake_case ( lowercase , lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (TimmBackbone,) if is_torch_available() else ()
_lowerCamelCase = {"feature-extraction": TimmBackbone} if is_torch_available() else {}
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = TimmBackboneModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = "resnet18"
lowerCamelCase_ = "microsoft/resnet-18"
lowerCamelCase_ = AutoBackbone.from_pretrained(UpperCamelCase , use_timm_backbone=UpperCamelCase )
lowerCamelCase_ = AutoBackbone.from_pretrained(UpperCamelCase )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
lowerCamelCase_ = AutoBackbone.from_pretrained(UpperCamelCase , use_timm_backbone=UpperCamelCase , out_indices=[1, 2, 3] )
lowerCamelCase_ = AutoBackbone.from_pretrained(UpperCamelCase , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("TimmBackbone doesn't support feed forward chunking" )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute" )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone initialization is managed on the timm side" )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint" )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone doesn't have hidden size info in its configuration." )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone doesn't support output_attentions." )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip("Safetensors is not supported by timm." )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(UpperCamelCase )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = True
lowerCamelCase_ = self.has_attentions
# no need to test all models as different heads yield the same functionality
lowerCamelCase_ = self.all_model_classes[0]
lowerCamelCase_ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
lowerCamelCase_ = self._prepare_for_class(UpperCamelCase , UpperCamelCase )
lowerCamelCase_ = model(**UpperCamelCase )
lowerCamelCase_ = outputs[0][-1]
# Encoder-/Decoder-only models
lowerCamelCase_ = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
lowerCamelCase_ = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=UpperCamelCase )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCamelCase_ = model(**UpperCamelCase )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
lowerCamelCase_ = copy.deepcopy(UpperCamelCase )
lowerCamelCase_ = None
lowerCamelCase_ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCamelCase_ = model(**UpperCamelCase )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
lowerCamelCase_ = copy.deepcopy(UpperCamelCase )
lowerCamelCase_ = False
lowerCamelCase_ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCamelCase_ = model(**UpperCamelCase )
| 445 | 1 |
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,snake_case__ ,snake_case__ = None ,snake_case__ = None ,snake_case__ = False ,snake_case__ = False ,snake_case__ = None ,snake_case__ = None ,**snake_case__ ,):
super().__init__(
features=snake_case__ ,cache_dir=snake_case__ ,keep_in_memory=snake_case__ ,streaming=snake_case__ ,num_proc=snake_case__ ,**snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Optional[int] = Generator(
cache_dir=snake_case__ ,features=snake_case__ ,generator=snake_case__ ,gen_kwargs=snake_case__ ,**snake_case__ ,)
def snake_case ( self ):
# Build iterable dataset
if self.streaming:
SCREAMING_SNAKE_CASE_ : Dict = self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
self.builder.download_and_prepare(
download_config=snake_case__ ,download_mode=snake_case__ ,verification_mode=snake_case__ ,base_path=snake_case__ ,num_proc=self.num_proc ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.builder.as_dataset(
split='train' ,verification_mode=snake_case__ ,in_memory=self.keep_in_memory )
return dataset
| 105 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
UpperCamelCase_ : Union[str, Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class __lowercase ( datasets.BuilderConfig ):
_A = None
_A = "utf-8"
_A = None
_A = None
_A = True # deprecated
_A = None # deprecated
_A = 10 << 20 # 10MB
_A = None
class __lowercase ( datasets.ArrowBasedBuilder ):
_A = JsonConfig
def _a(self : List[Any] ) -> Dict:
if self.config.block_size is not None:
logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead" )
_lowercase : Union[str, Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore." )
if self.config.newlines_in_values is not None:
raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported" )
return datasets.DatasetInfo(features=self.config.features )
def _a(self : Any , snake_case : Union[str, Any] ) -> List[str]:
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_lowercase : str = dl_manager.download_and_extract(self.config.data_files )
if isinstance(snake_case , (str, list, tuple) ):
_lowercase : int = data_files
if isinstance(snake_case , snake_case ):
_lowercase : Union[str, Any] = [files]
_lowercase : Union[str, Any] = [dl_manager.iter_files(snake_case ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
_lowercase : Union[str, Any] = []
for split_name, files in data_files.items():
if isinstance(snake_case , snake_case ):
_lowercase : List[Any] = [files]
_lowercase : Optional[Any] = [dl_manager.iter_files(snake_case ) for file in files]
splits.append(datasets.SplitGenerator(name=snake_case , gen_kwargs={"files": files} ) )
return splits
def _a(self : List[Any] , snake_case : pa.Table ) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
_lowercase : Any = self.config.features.arrow_schema.field(snake_case ).type
_lowercase : Optional[Any] = pa_table.append_column(snake_case , pa.array([None] * len(snake_case ) , type=snake_case ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowercase : str = table_cast(snake_case , self.config.features.arrow_schema )
return pa_table
def _a(self : str , snake_case : Tuple ) -> Tuple:
for file_idx, file in enumerate(itertools.chain.from_iterable(snake_case ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(snake_case , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
_lowercase : Any = json.load(snake_case )
# We keep only the field we are interested in
_lowercase : List[Any] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(snake_case , (list, tuple) ):
_lowercase : Optional[int] = set().union(*[row.keys() for row in dataset] )
_lowercase : Union[str, Any] = {col: [row.get(snake_case ) for row in dataset] for col in keys}
else:
_lowercase : List[Any] = dataset
_lowercase : int = pa.Table.from_pydict(snake_case )
yield file_idx, self._cast_table(snake_case )
# If the file has one json object per line
else:
with open(snake_case , "rb" ) as f:
_lowercase : Union[str, Any] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
_lowercase : Dict = max(self.config.chunksize // 32 , 16 << 10 )
_lowercase : int = (
self.config.encoding_errors if self.config.encoding_errors is not None else "strict"
)
while True:
_lowercase : Tuple = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(snake_case )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
_lowercase : Union[str, Any] = batch.decode(self.config.encoding , errors=snake_case ).encode("utf-8" )
try:
while True:
try:
_lowercase : Optional[Any] = paj.read_json(
io.BytesIO(snake_case ) , read_options=paj.ReadOptions(block_size=snake_case ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(snake_case , pa.ArrowInvalid )
and "straddling" not in str(snake_case )
or block_size > len(snake_case )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(snake_case )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
snake_case , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
_lowercase : Any = json.load(snake_case )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(snake_case )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(snake_case , snake_case ): # list is the only sequence type supported in JSON
try:
_lowercase : int = set().union(*[row.keys() for row in dataset] )
_lowercase : Dict = {col: [row.get(snake_case ) for row in dataset] for col in keys}
_lowercase : str = pa.Table.from_pydict(snake_case )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(snake_case )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(snake_case )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(snake_case )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(snake_case )
batch_idx += 1
| 461 | 0 |
from importlib import import_module
from .logging import get_logger
__snake_case = get_logger(__name__)
class UpperCAmelCase :
def __init__( self : List[str] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int]=None ):
"""simple docstring"""
UpperCamelCase = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("""__""" ):
setattr(self , __magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
UpperCamelCase = module._original_module if isinstance(__magic_name__ , _PatchedModuleObj ) else module
class UpperCAmelCase :
lowercase = []
def __init__( self : List[Any] , __magic_name__ : str , __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : Dict=None ):
"""simple docstring"""
UpperCamelCase = obj
UpperCamelCase = target
UpperCamelCase = new
UpperCamelCase = target.split(""".""" )[0]
UpperCamelCase = {}
UpperCamelCase = attrs or []
def __enter__( self : Any ):
"""simple docstring"""
*UpperCamelCase , UpperCamelCase = self.target.split(""".""" )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(__magic_name__ ) ):
try:
UpperCamelCase = import_module(""".""".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
UpperCamelCase = getattr(self.obj , __magic_name__ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(__magic_name__ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
UpperCamelCase = obj_attr
# patch at top level
setattr(self.obj , __magic_name__ , _PatchedModuleObj(__magic_name__ , attrs=self.attrs ) )
UpperCamelCase = getattr(self.obj , __magic_name__ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(__magic_name__ , __magic_name__ , _PatchedModuleObj(getattr(__magic_name__ , __magic_name__ , __magic_name__ ) , attrs=self.attrs ) )
UpperCamelCase = getattr(__magic_name__ , __magic_name__ )
# finally set the target attribute
setattr(__magic_name__ , __magic_name__ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
UpperCamelCase = getattr(import_module(""".""".join(__magic_name__ ) ) , __magic_name__ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , __magic_name__ ) is attr_value:
UpperCamelCase = getattr(self.obj , __magic_name__ )
setattr(self.obj , __magic_name__ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
UpperCamelCase = globals()["""__builtins__"""][target_attr]
setattr(self.obj , __magic_name__ , self.new )
else:
raise RuntimeError(F'Tried to patch attribute {target_attr} instead of a submodule.' )
def __exit__( self : Union[str, Any] , *__magic_name__ : List[str] ):
"""simple docstring"""
for attr in list(self.original ):
setattr(self.obj , __magic_name__ , self.original.pop(__magic_name__ ) )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
self.__enter__()
self._active_patches.append(self )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 718 |
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase ( __snake_case ):
lowercase = ["""image_processor""", """tokenizer"""]
lowercase = """FlavaImageProcessor"""
lowercase = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Optional[int] , __magic_name__ : Optional[int]=None , __magic_name__ : str=None , **__magic_name__ : Any ):
"""simple docstring"""
UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __magic_name__ , )
UpperCamelCase = kwargs.pop("""feature_extractor""" )
UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__magic_name__ , __magic_name__ )
UpperCamelCase = self.image_processor
def __call__( self : int , __magic_name__ : Optional[ImageInput] = None , __magic_name__ : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , __magic_name__ : bool = True , __magic_name__ : Union[bool, str, PaddingStrategy] = False , __magic_name__ : Union[bool, str, TruncationStrategy] = False , __magic_name__ : Optional[int] = None , __magic_name__ : int = 0 , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = True , __magic_name__ : Optional[Union[str, TensorType]] = None , **__magic_name__ : str , ):
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
UpperCamelCase = self.tokenizer(
text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_token_type_ids=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
if images is not None:
UpperCamelCase = self.image_processor(
__magic_name__ , return_image_mask=__magic_name__ , return_codebook_pixels=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
if text is not None and images is not None:
encoding.update(__magic_name__ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__magic_name__ ) , tensor_type=__magic_name__ )
def lowerCamelCase_ ( self : Tuple , *__magic_name__ : Dict , **__magic_name__ : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def lowerCamelCase_ ( self : int , *__magic_name__ : Tuple , **__magic_name__ : Optional[Any] ):
"""simple docstring"""
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.tokenizer.model_input_names
UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __magic_name__ , )
return self.image_processor_class
@property
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __magic_name__ , )
return self.image_processor
| 181 | 0 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
lowercase_ = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
lowercase_ = {
'abeja/gpt-neox-japanese-2.7b': 2_0_4_8,
}
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' ) as f:
__lowerCamelCase : Any = json.loads(f.read() )
__lowerCamelCase : List[str] = collections.OrderedDict()
__lowerCamelCase : List[str] = collections.OrderedDict()
__lowerCamelCase : Any = collections.OrderedDict()
with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' ) as f:
__lowerCamelCase : Union[str, Any] = f.readlines()
__lowerCamelCase : str = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Any = b
__lowerCamelCase : Optional[int] = idx
for wd in b:
__lowerCamelCase : Tuple = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self: Union[str, Any] , a: int , a: str , a: Dict="<|endoftext|>" , a: str="<|endoftext|>" , a: Any="<|startoftext|>" , a: List[str]="<|endoftext|>" , a: List[Any]=False , **a: Union[str, Any] , ):
super().__init__(
unk_token=a , pad_token=a , bos_token=a , eos_token=a , do_clean_text=a , **a , )
if not os.path.isfile(a ):
raise ValueError(
F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(a ):
raise ValueError(
F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
__lowerCamelCase : List[str] = do_clean_text
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = load_vocab_and_emoji(a , a )
__lowerCamelCase : Union[str, Any] = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def _snake_case ( self: List[str] ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def _snake_case ( self: Tuple ):
return dict(self.raw_vocab , **self.added_tokens_encoder )
def _snake_case ( self: str , a: Any ):
return self.subword_tokenizer.tokenize(a , clean=self.do_clean_text )
def _snake_case ( self: str , a: Dict ):
return self.vocab.get(a , self.vocab.get(self.unk_token ) )
def _snake_case ( self: Dict , a: Optional[Any] ):
return self.subword_tokenizer.convert_id_to_token(a )
def _snake_case ( self: str , a: Any ):
__lowerCamelCase : List[str] = ''.join(a ).strip()
return out_string
def _snake_case ( self: List[Any] , a: "Conversation" ):
__lowerCamelCase : Tuple = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a , add_special_tokens=a ) + [self.eos_token_id] )
if len(a ) > self.model_max_length:
__lowerCamelCase : Dict = input_ids[-self.model_max_length :]
return input_ids
def _snake_case ( self: List[Any] , a: str , a: Optional[str] = None ):
__lowerCamelCase : Dict = 0
if os.path.isdir(a ):
__lowerCamelCase : Tuple = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__lowerCamelCase : Optional[int] = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
__lowerCamelCase : List[Any] = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
__lowerCamelCase : List[str] = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(a , 'w' , encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
__lowerCamelCase : Optional[int] = token_index
writer.write(','.join(a ) + '\n' )
index += 1
with open(a , 'w' , encoding='utf-8' ) as writer:
json.dump(self.emoji , a )
return vocab_file, emoji_file
class A_ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self: str , a: Dict , a: List[Any] , a: Dict ):
__lowerCamelCase : str = vocab # same as swe
__lowerCamelCase : Dict = ids_to_tokens # same as bpe
__lowerCamelCase : Tuple = emoji
__lowerCamelCase : int = np.max([len(a ) for w in self.vocab.keys()] )
__lowerCamelCase : Optional[int] = re.compile(R'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
__lowerCamelCase : Optional[int] = re.compile(R'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
__lowerCamelCase : Any = re.compile(R'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
__lowerCamelCase : Optional[int] = re.compile(
R'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
__lowerCamelCase : Dict = re.compile(
R'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
__lowerCamelCase : Union[str, Any] = re.compile(
R'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
__lowerCamelCase : int = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
__lowerCamelCase : Optional[Any] = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
__lowerCamelCase : Tuple = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self: Optional[int] ):
return len(self.ids_to_tokens )
def _snake_case ( self: Optional[Any] , a: Tuple ):
__lowerCamelCase : str = self.content_repattera.sub('<URL>' , a )
__lowerCamelCase : List[str] = self.content_repattera.sub('<EMAIL>' , a )
__lowerCamelCase : Tuple = self.content_repattera.sub('<TEL>' , a )
__lowerCamelCase : Union[str, Any] = self.content_repattera.sub('<DATE>' , a )
__lowerCamelCase : str = self.content_repattera.sub('<DATE>' , a )
__lowerCamelCase : List[str] = self.content_repattera.sub('<PRICE>' , a )
__lowerCamelCase : Optional[Any] = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
__lowerCamelCase : Dict = content.replace('<BLOCK><BLOCK>' , '<BLOCK>' )
return content
def _snake_case ( self: int , a: List[Any] , a: Any=False ):
__lowerCamelCase : List[str] = text.replace(' ' , '<SP>' )
__lowerCamelCase : List[str] = text.replace(' ' , '<SP>' )
__lowerCamelCase : Union[str, Any] = text.replace('\r\n' , '<BR>' )
__lowerCamelCase : Optional[int] = text.replace('\n' , '<BR>' )
__lowerCamelCase : int = text.replace('\r' , '<BR>' )
__lowerCamelCase : Union[str, Any] = text.replace('\t' , '<TAB>' )
__lowerCamelCase : Optional[Any] = text.replace('—' , 'ー' )
__lowerCamelCase : Dict = text.replace('−' , 'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
__lowerCamelCase : Dict = text.replace(a , a )
if clean:
__lowerCamelCase : int = self.clean_text(a )
def check_simbol(a: Optional[int] ):
__lowerCamelCase : Optional[int] = x.encode()
if len(a ) == 1 and len(a ) == 2:
__lowerCamelCase : str = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0Xc2a1 and c <= 0Xc2bf)
or (c >= 0Xc780 and c <= 0Xc783)
or (c >= 0Xcab9 and c <= 0Xcbbf)
or (c >= 0Xcc80 and c <= 0Xcda2)
):
return True
return False
def checkuae(a: str ):
__lowerCamelCase : Union[str, Any] = x.encode()
if len(a ) == 1 and len(a ) == 3:
__lowerCamelCase : List[Any] = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0Xe2_8080 and c <= 0Xe2_b07f:
return True
return False
__lowerCamelCase : int = 0
__lowerCamelCase : Any = []
while pos < len(a ):
__lowerCamelCase : Optional[Any] = min(len(a ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
__lowerCamelCase : Optional[int] = [] # (token_id, token, pos)
for e in range(a , a , -1 ):
__lowerCamelCase : Optional[int] = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(a ) > 2:
__lowerCamelCase : Any = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(a ) > 0:
# the smallest token_id is adopted
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[int] = sorted(a , key=lambda a : x[0] )[0]
result.append(a )
__lowerCamelCase : str = e
else:
__lowerCamelCase : Union[str, Any] = pos + 1
__lowerCamelCase : Optional[int] = text[pos:end]
if check_simbol(a ):
result.append('<KIGOU>' )
elif checkuae(a ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
__lowerCamelCase : Dict = end
return result
def _snake_case ( self: Any , a: List[str] , a: str="\n" ):
__lowerCamelCase : str = []
__lowerCamelCase : Union[str, Any] = []
__lowerCamelCase : Optional[Any] = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(a ) > 0:
words.append(bytearray(a ).decode('utf-8' , errors='replace' ) )
__lowerCamelCase : Tuple = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(a )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(a )
if len(a ) > 0:
words.append(bytearray(a ).decode('utf-8' , errors='replace' ) )
__lowerCamelCase : Optional[int] = ''.join(a )
return text
| 669 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = CLIPTokenizer
__snake_case = CLIPTokenizerFast
__snake_case = True
__snake_case = {}
__snake_case = False
def _snake_case ( self: Union[str, Any] ):
super().setUp()
# fmt: off
__lowerCamelCase : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__lowerCamelCase : Tuple = dict(zip(a , range(len(a ) ) ) )
__lowerCamelCase : List[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
__lowerCamelCase : Tuple = {'unk_token': '<unk>'}
__lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(a ) )
def _snake_case ( self: Tuple , **a: Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: Union[str, Any] , **a: List[str] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: Optional[int] , a: List[Any] ):
__lowerCamelCase : Tuple = 'lower newer'
__lowerCamelCase : Tuple = 'lower newer'
return input_text, output_text
def _snake_case ( self: List[str] ):
__lowerCamelCase : List[Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCamelCase : Optional[Any] = 'lower newer'
__lowerCamelCase : int = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
__lowerCamelCase : Optional[int] = tokenizer.tokenize(a )
self.assertListEqual(a , a )
__lowerCamelCase : int = tokens + [tokenizer.unk_token]
__lowerCamelCase : int = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
@require_ftfy
def _snake_case ( self: Union[str, Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase : List[Any] = self.tokenizer_class.from_pretrained(a , **a )
__lowerCamelCase : int = self.rust_tokenizer_class.from_pretrained(a , **a )
__lowerCamelCase : str = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
__lowerCamelCase : Optional[Any] = tokenizer_s.tokenize(a )
__lowerCamelCase : Optional[Any] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
__lowerCamelCase : List[Any] = 'xa\u0303y' + ' ' + 'x\xe3y'
__lowerCamelCase : Tuple = tokenizer_s.tokenize(a )
__lowerCamelCase : Any = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on unicode of space type
__lowerCamelCase : List[Any] = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
__lowerCamelCase : List[Any] = tokenizer_s.tokenize(a )
__lowerCamelCase : Optional[int] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on unicode of line break type
__lowerCamelCase : str = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
__lowerCamelCase : Dict = tokenizer_s.tokenize(a )
__lowerCamelCase : List[str] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
def _snake_case ( self: List[Any] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
__lowerCamelCase : Optional[int] = F'{text_of_1_token} {text_of_1_token}'
__lowerCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
__lowerCamelCase : Any = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ) + 1, len(a ) + 1 + len(a )) , )
__lowerCamelCase : List[Any] = F' {text}'
__lowerCamelCase : str = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
__lowerCamelCase : Any = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a ) + 1, 1 + len(a ) + 1 + len(a )) , )
def _snake_case ( self: str ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(a ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def _snake_case ( self: Tuple ):
super().test_tokenization_python_rust_equals()
def _snake_case ( self: Tuple ):
# CLIP always lower cases letters
pass
| 669 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a = {
'configuration_informer': [
'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'InformerForPrediction',
'InformerModel',
'InformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 529 |
"""simple docstring"""
from __future__ import annotations
def lowercase (snake_case__ : list[int] , snake_case__ : int , snake_case__ : int , snake_case__ : int ) -> None:
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
lowerCAmelCase , lowerCAmelCase = array[indexa], array[indexa]
def lowercase (snake_case__ : list[int] , snake_case__ : int , snake_case__ : int , snake_case__ : int ) -> None:
'''simple docstring'''
if length > 1:
lowerCAmelCase = int(length / 2 )
for i in range(snake_case__ , low + middle ):
comp_and_swap(snake_case__ , snake_case__ , i + middle , snake_case__ )
bitonic_merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
bitonic_merge(snake_case__ , low + middle , snake_case__ , snake_case__ )
def lowercase (snake_case__ : list[int] , snake_case__ : int , snake_case__ : int , snake_case__ : int ) -> None:
'''simple docstring'''
if length > 1:
lowerCAmelCase = int(length / 2 )
bitonic_sort(snake_case__ , snake_case__ , snake_case__ , 1 )
bitonic_sort(snake_case__ , low + middle , snake_case__ , 0 )
bitonic_merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if __name__ == "__main__":
a = input('Enter numbers separated by a comma:\n').strip()
a = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 529 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_a : Optional[int] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
_a : Union[str, Any] = 256_047
_a : str = 256_145
@require_sentencepiece
@require_tokenizers
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Dict = NllbTokenizer
_SCREAMING_SNAKE_CASE : Tuple = NllbTokenizerFast
_SCREAMING_SNAKE_CASE : str = True
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : Any = {}
def a ( self : List[str] ) -> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case = NllbTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self : int ) -> int:
__snake_case = NllbTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__snake_case = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__snake_case = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__snake_case = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def a ( self : List[str] ) -> Tuple:
__snake_case = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-nllb', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__snake_case = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__snake_case = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__snake_case = tempfile.mkdtemp()
__snake_case = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
__snake_case = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
__snake_case = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=True
__snake_case = tempfile.mkdtemp()
__snake_case = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
__snake_case = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=False
__snake_case = tempfile.mkdtemp()
__snake_case = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__snake_case = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
@require_torch
def a ( self : Dict ) -> Optional[Any]:
if not self.test_seqaseq:
return
__snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Longer text that will definitely require truncation.
__snake_case = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'
' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'
' will only worsen the violence and misery for millions of people.',
]
__snake_case = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'
' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'
' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
try:
__snake_case = tokenizer.prepare_seqaseq_batch(
src_texts=SCREAMING_SNAKE_CASE_ , tgt_texts=SCREAMING_SNAKE_CASE_ , max_length=3 , max_target_length=10 , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='ron_Latn' , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
__snake_case = tokenizer.prepare_seqaseq_batch(
SCREAMING_SNAKE_CASE_ , tgt_texts=SCREAMING_SNAKE_CASE_ , max_length=3 , return_tensors='pt' )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
__snake_case = tokenizer.prepare_seqaseq_batch(
src_texts=SCREAMING_SNAKE_CASE_ , max_length=3 , max_target_length=10 , return_tensors='pt' )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn('decoder_input_ids' , SCREAMING_SNAKE_CASE_ )
@unittest.skip('Unfortunately way too slow to build a BPE with SentencePiece.' )
def a ( self : Any ) -> Union[str, Any]:
pass
def a ( self : str ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__snake_case = [AddedToken('<special>' , lstrip=SCREAMING_SNAKE_CASE_ )]
__snake_case = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer_r.encode('Hey this is a <special> token' )
__snake_case = tokenizer_r.encode('<special>' , add_special_tokens=SCREAMING_SNAKE_CASE_ )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
__snake_case = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
__snake_case = self.tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer_p.encode('Hey this is a <special> token' )
__snake_case = tokenizer_cr.encode('Hey this is a <special> token' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "facebook/nllb-200-distilled-600M"
_SCREAMING_SNAKE_CASE : Optional[Any] = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
_SCREAMING_SNAKE_CASE : Optional[int] = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
_SCREAMING_SNAKE_CASE : List[str] = [
2_5_6_0_4_7,
1_6_2_9_7,
1_3_4_4_0_8,
8_1_6_5,
2_4_8_0_6_6,
1_4_7_3_4,
9_5_0,
1_1_3_5,
1_0_5_7_2_1,
3_5_7_3,
8_3,
2_7_3_5_2,
1_0_8,
4_9_4_8_6,
2,
]
@classmethod
def a ( cls : Union[str, Any] ) -> Union[str, Any]:
__snake_case = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='eng_Latn' , tgt_lang='ron_Latn' )
__snake_case = 1
return cls
def a ( self : int ) -> str:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Arab'] , 25_6001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Latn'] , 25_6002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['fra_Latn'] , 25_6057 )
def a ( self : Optional[int] ) -> int:
__snake_case = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ )
def a ( self : Tuple ) -> Union[str, Any]:
self.assertIn(SCREAMING_SNAKE_CASE_ , self.tokenizer.all_special_ids )
# fmt: off
__snake_case = [RO_CODE, 4254, 9_8068, 11_2923, 3_9072, 3909, 713, 10_2767, 26, 1_7314, 3_5642, 1_4683, 3_3118, 2022, 6_6987, 2, 25_6047]
# fmt: on
__snake_case = self.tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
__snake_case = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE_ )
def a ( self : List[Any] ) -> Union[str, Any]:
__snake_case = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , SCREAMING_SNAKE_CASE_ )
__snake_case = 10
__snake_case = self.tokenizer(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> str:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_6203, 3] )
def a ( self : str ) -> Tuple:
__snake_case = tempfile.mkdtemp()
__snake_case = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = NllbTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , SCREAMING_SNAKE_CASE_ )
@require_torch
def a ( self : str ) -> List[Any]:
__snake_case = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
__snake_case = shift_tokens_right(
batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['ron_Latn'] )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
__snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def a ( self : int ) -> List[Any]:
__snake_case = self.tokenizer(self.src_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=3 , return_tensors='pt' )
__snake_case = self.tokenizer(
text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=10 , return_tensors='pt' )
__snake_case = targets['input_ids']
__snake_case = shift_tokens_right(
SCREAMING_SNAKE_CASE_ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def a ( self : Union[str, Any] ) -> Any:
__snake_case = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , {
# A, test, EOS, en_XX
'input_ids': [[25_6047, 70, 7356, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 25_6057,
} , )
@require_torch
def a ( self : List[str] ) -> Union[str, Any]:
__snake_case = True
__snake_case = self.tokenizer(
'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
inputs.input_ids , [1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2, 25_6047] )
__snake_case = False
__snake_case = self.tokenizer(
'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
inputs.input_ids , [25_6047, 1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2] )
| 56 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ : Any = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Tuple = ['MaskFormerFeatureExtractor']
UpperCAmelCase_ : List[str] = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
UpperCAmelCase_ : Optional[int] = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 365 | 0 |
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip | 706 | import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
snake_case__ = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
snake_case__ = [0, 25, 50]
snake_case__ = [25, 50, 75]
snake_case__ = fuzz.membership.trimf(X, abca)
snake_case__ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
snake_case__ = np.ones(75)
snake_case__ = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
snake_case__ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
snake_case__ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
snake_case__ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
snake_case__ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
snake_case__ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
snake_case__ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
snake_case__ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
snake_case__ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show() | 638 | 0 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase )
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = field(default='automatic-speech-recognition' , metadata={'include_in_asdict_even_if_is_default': True} )
lowercase__ = Features({'audio': Audio()} )
lowercase__ = Features({'transcription': Value('string' )} )
lowercase__ = "audio"
lowercase__ = "transcription"
def UpperCAmelCase ( self , __a) -> int:
'''simple docstring'''
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''')
if not isinstance(features[self.audio_column] , __a):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''')
_UpperCamelCase = copy.deepcopy(self)
_UpperCamelCase = self.input_schema.copy()
_UpperCamelCase = features[self.audio_column]
_UpperCamelCase = input_schema
return task_template
@property
def UpperCAmelCase ( self) -> Dict[str, str]:
'''simple docstring'''
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 19 | '''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : str = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[int] = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase__ = '''efficientformer'''
def __init__( self : Optional[Any] , lowercase__ : List[int] = [3, 2, 6, 4] , lowercase__ : List[int] = [48, 96, 224, 448] , lowercase__ : List[bool] = [True, True, True, True] , lowercase__ : int = 448 , lowercase__ : int = 32 , lowercase__ : int = 4 , lowercase__ : int = 7 , lowercase__ : int = 5 , lowercase__ : int = 8 , lowercase__ : int = 4 , lowercase__ : float = 0.0 , lowercase__ : int = 16 , lowercase__ : int = 3 , lowercase__ : int = 3 , lowercase__ : int = 3 , lowercase__ : int = 2 , lowercase__ : int = 1 , lowercase__ : float = 0.0 , lowercase__ : int = 1 , lowercase__ : bool = True , lowercase__ : bool = True , lowercase__ : float = 1e-5 , lowercase__ : str = "gelu" , lowercase__ : float = 0.0_2 , lowercase__ : float = 1e-12 , lowercase__ : int = 224 , lowercase__ : float = 1e-05 , **lowercase__ : Any , ) ->None:
'''simple docstring'''
super().__init__(**lowercase__ )
_UpperCamelCase : Any = hidden_act
_UpperCamelCase : Optional[Any] = hidden_dropout_prob
_UpperCamelCase : List[str] = hidden_sizes
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : List[Any] = num_attention_heads
_UpperCamelCase : int = initializer_range
_UpperCamelCase : Dict = layer_norm_eps
_UpperCamelCase : Any = patch_size
_UpperCamelCase : int = num_channels
_UpperCamelCase : int = depths
_UpperCamelCase : Union[str, Any] = mlp_expansion_ratio
_UpperCamelCase : Union[str, Any] = downsamples
_UpperCamelCase : Optional[Any] = dim
_UpperCamelCase : Tuple = key_dim
_UpperCamelCase : Tuple = attention_ratio
_UpperCamelCase : Dict = resolution
_UpperCamelCase : Any = pool_size
_UpperCamelCase : List[Any] = downsample_patch_size
_UpperCamelCase : str = downsample_stride
_UpperCamelCase : int = downsample_pad
_UpperCamelCase : Any = drop_path_rate
_UpperCamelCase : List[str] = num_metaad_blocks
_UpperCamelCase : List[Any] = distillation
_UpperCamelCase : Optional[Any] = use_layer_scale
_UpperCamelCase : Union[str, Any] = layer_scale_init_value
_UpperCamelCase : Tuple = image_size
_UpperCamelCase : Union[str, Any] = batch_norm_eps
| 435 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self : List[str] , A_ : str )-> int:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
__UpperCamelCase = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(A_ )
def A ( self : Tuple )-> int:
__UpperCamelCase = "sshleifer/tiny-gpt2"
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Dict )-> int:
__UpperCamelCase = "sgugger/tiny-distilbert-classification"
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , only_pretrain_model=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : List[str] )-> Dict:
__UpperCamelCase = "sshleifer/tiny-gpt2"
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , torchscript=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def A ( self : Optional[Any] )-> Union[str, Any]:
__UpperCamelCase = "sshleifer/tiny-gpt2"
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , fpaa=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Dict )-> Tuple:
__UpperCamelCase = "sshleifer/tiny-gpt2"
__UpperCamelCase = AutoConfig.from_pretrained(A_ )
# set architectures equal to `None`
__UpperCamelCase = None
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ , configs=[config] )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Union[str, Any] )-> str:
__UpperCamelCase = "sshleifer/tiny-gpt2"
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == "cpu" , "Can't do half precision" )
def A ( self : List[Any] )-> List[Any]:
__UpperCamelCase = "sshleifer/tiny-gpt2"
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=A_ , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A ( self : Tuple )-> Union[str, Any]:
__UpperCamelCase = "sshleifer/tiny-gpt2"
__UpperCamelCase = AutoConfig.from_pretrained(A_ )
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ , configs=[config] )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Any )-> List[str]:
__UpperCamelCase = "sshleifer/tinier_bart"
__UpperCamelCase = AutoConfig.from_pretrained(A_ )
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ , configs=[config] )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Tuple )-> Optional[int]:
__UpperCamelCase = "sshleifer/tiny-gpt2"
__UpperCamelCase = AutoConfig.from_pretrained(A_ )
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ , configs=[config] )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A ( self : List[str] )-> Dict:
__UpperCamelCase = "sshleifer/tinier_bart"
__UpperCamelCase = AutoConfig.from_pretrained(A_ )
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ , configs=[config] )
__UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A ( self : int )-> Optional[Any]:
__UpperCamelCase = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , save_to_csv=A_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(A_ , "inf_time.csv" ) , train_memory_csv_file=os.path.join(A_ , "train_mem.csv" ) , inference_memory_csv_file=os.path.join(A_ , "inf_mem.csv" ) , train_time_csv_file=os.path.join(A_ , "train_time.csv" ) , env_info_csv_file=os.path.join(A_ , "env.csv" ) , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ )
benchmark.run()
self.assertTrue(Path(os.path.join(A_ , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(A_ , "train_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(A_ , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(A_ , "train_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(A_ , "env.csv" ) ).exists() )
def A ( self : List[Any] )-> str:
__UpperCamelCase = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(A_ : List[str] ):
self.assertTrue(hasattr(A_ , "sequential" ) )
self.assertTrue(hasattr(A_ , "cumulative" ) )
self.assertTrue(hasattr(A_ , "current" ) )
self.assertTrue(hasattr(A_ , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(A_ , "log.txt" ) , log_print=A_ , trace_memory_line_by_line=A_ , multi_process=A_ , )
__UpperCamelCase = PyTorchBenchmark(A_ )
__UpperCamelCase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(A_ , "log.txt" ) ).exists() )
| 703 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def A ( self : Union[str, Any] )-> Tuple:
__UpperCamelCase = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
__UpperCamelCase = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
__UpperCamelCase = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
__UpperCamelCase = torch.tensor(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__UpperCamelCase = model(A_ )["last_hidden_state"].detach()
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , A_ , atol=1e-3 ) )
@slow
def A ( self : List[Any] )-> Union[str, Any]:
__UpperCamelCase = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
__UpperCamelCase = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
__UpperCamelCase = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
__UpperCamelCase = torch.tensor(
[[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__UpperCamelCase = model(A_ )["last_hidden_state"].detach()
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , A_ , atol=1e-3 ) ) | 228 | 0 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowercase_ = re.compile("""[^A-Za-z_0-9]""")
# parameters used in DuplicationIndex
lowercase_ = 10
lowercase_ = 256
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[MinHash]:
if len(_SCREAMING_SNAKE_CASE ) < MIN_NUM_TOKENS:
return None
lowercase__ = MinHash(num_perm=_SCREAMING_SNAKE_CASE )
for token in set(_SCREAMING_SNAKE_CASE ):
min_hash.update(token.encode() )
return min_hash
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Set[str]:
return {t for t in NON_ALPHA.split(_SCREAMING_SNAKE_CASE ) if len(t.strip() ) > 0}
class SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , *,
a : float = 0.85 , )-> Any:
"""simple docstring"""
lowercase__ = duplication_jaccard_threshold
lowercase__ = NUM_PERM
lowercase__ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
lowercase__ = defaultdict(a )
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : Tuple , a : MinHash )-> None:
"""simple docstring"""
lowercase__ = self._index.query(a )
if code_key in self._index.keys:
print(f"""Duplicate key {code_key}""" )
return
self._index.insert(a , a )
if len(a ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(a )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(a )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[List[Dict]]:
"""simple docstring"""
lowercase__ = []
for base, duplicates in self._duplicate_clusters.items():
lowercase__ = [base] + list(a )
# reformat the cluster to be a list of dict
lowercase__ = [{'base_index': el[0], 'repo_name': el[1], 'path': el[2]} for el in cluster]
duplicate_clusters.append(a )
return duplicate_clusters
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : Optional[int] )-> None:
"""simple docstring"""
lowercase__ = self.get_duplicate_clusters()
with open(a , 'w' ) as f:
json.dump(a , a )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[int]:
lowercase__ , lowercase__ = element
lowercase__ = get_min_hash([t for t in NON_ALPHA.split(data['content'] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[str]:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_SCREAMING_SNAKE_CASE , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
lowercase__ = DuplicationIndex(duplication_jaccard_threshold=_SCREAMING_SNAKE_CASE )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_SCREAMING_SNAKE_CASE ) ) , max_queue_size=100 ) ):
di.add(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
lowercase__ = get_tokens(_SCREAMING_SNAKE_CASE )
lowercase__ = get_tokens(_SCREAMING_SNAKE_CASE )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowercase_ = None
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = []
for elementa in cluster:
lowercase__ = _shared_dataset[elementa['base_index']]['content']
for elementa in extremes:
lowercase__ = _shared_dataset[elementa['base_index']]['content']
if jaccard_similarity(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowercase__ = 1
extremes.append(_SCREAMING_SNAKE_CASE )
return extremes
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
global _shared_dataset
lowercase__ = dataset
lowercase__ = []
lowercase__ = partial(_find_cluster_extremes_shared , jaccard_threshold=_SCREAMING_SNAKE_CASE )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) , total=len(_SCREAMING_SNAKE_CASE ) , ):
extremes_list.append(_SCREAMING_SNAKE_CASE )
return extremes_list
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0.8_5 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
lowercase__ = make_duplicate_clusters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase__ = {x['base_index'] for cluster in duplicate_clusters for x in cluster}
lowercase__ = {}
lowercase__ = find_extremes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for extremes in extremes_clusters:
for element in extremes:
lowercase__ = element
lowercase__ = duplicate_indices - set(extreme_dict.keys() )
lowercase__ = dataset.filter(lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : idx not in remove_indices , with_indices=_SCREAMING_SNAKE_CASE )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowercase__ = element['base_index'] in extreme_dict
if element["is_extreme"]:
lowercase__ = extreme_dict[element['base_index']]['copies']
print(F"""Original dataset size: {len(_SCREAMING_SNAKE_CASE )}""" )
print(F"""Number of duplicate clusters: {len(_SCREAMING_SNAKE_CASE )}""" )
print(F"""Files in duplicate cluster: {len(_SCREAMING_SNAKE_CASE )}""" )
print(F"""Unique files in duplicate cluster: {len(_SCREAMING_SNAKE_CASE )}""" )
print(F"""Filtered dataset size: {len(_SCREAMING_SNAKE_CASE )}""" )
return ds_filter, duplicate_clusters
| 235 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowercase_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Union[str, Any] = ['audio_values', 'audio_mask']
def __init__( self : Dict , a : Optional[Any]=2_048 , a : Union[str, Any]=1 , a : str=[16, 16] , a : Optional[int]=128 , a : str=44_100 , a : List[str]=86 , a : int=2_048 , a : Tuple=0.0 , **a : int , )-> Any:
"""simple docstring"""
super().__init__(
feature_size=a , sampling_rate=a , padding_value=a , **a , )
lowercase__ = spectrogram_length
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = feature_size // self.patch_size[1]
lowercase__ = n_fft
lowercase__ = sampling_rate // hop_length_to_sampling_rate
lowercase__ = sampling_rate
lowercase__ = padding_value
lowercase__ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=a , norm='slaney' , mel_scale='slaney' , ).T
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : np.array )-> np.ndarray:
"""simple docstring"""
lowercase__ = spectrogram(
a , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , )
lowercase__ = log_spec[:, :-1]
lowercase__ = log_spec - 20.0
lowercase__ = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : List[Any] , a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a : Optional[Union[str, TensorType]] = None , a : Optional[bool] = True , a : Optional[int] = None , a : bool = False , a : bool = False , **a : Union[str, Any] , )-> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
f""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowercase__ = isinstance(a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
lowercase__ = is_batched_numpy or (
isinstance(a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a , np.ndarray ):
lowercase__ = np.asarray(a , dtype=np.floataa )
elif isinstance(a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowercase__ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , a ):
lowercase__ = [np.asarray(a , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowercase__ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowercase__ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowercase__ = np.array(a ).astype(np.floataa )
# convert into correct format for padding
lowercase__ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowercase__ = np.ones([len(a ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowercase__ = padded_audio_features * self.padding_value
for i in range(len(a ) ):
lowercase__ = audio_features[i]
lowercase__ = feature
# return as BatchFeature
if return_attention_mask:
lowercase__ = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
lowercase__ = {'audio_values': padded_audio_features}
lowercase__ = BatchFeature(data=a , tensor_type=a )
return encoded_inputs
| 235 | 1 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : int = '''M-CLIP'''
def __init__( self , lowercase__=1_0_2_4 , lowercase__=7_6_8 , **lowercase__):
__UpperCAmelCase : Optional[int] = transformerDimSize
__UpperCAmelCase : Optional[int] = imageDimSize
super().__init__(**lowercase__)
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : int = MCLIPConfig
def __init__( self , lowercase__ , *lowercase__ , **lowercase__):
super().__init__(lowercase__ , *lowercase__ , **lowercase__)
__UpperCAmelCase : int = XLMRobertaModel(lowercase__)
__UpperCAmelCase : Optional[Any] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims)
def A( self , lowercase__ , lowercase__):
__UpperCAmelCase : List[Any] = self.transformer(input_ids=lowercase__ , attention_mask=lowercase__)[0]
__UpperCAmelCase : Any = (embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None]
return self.LinearTransformation(lowercase__), embs
| 675 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Tuple = MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError('''Quantized models are not supported.''' )
__UpperCAmelCase : List[Any] = re.match(r'''^mobilenet_v1_([^_]*)_([^_]*)$''' , lowercase_ )
if matches:
__UpperCAmelCase : Any = float(matches[1] )
__UpperCAmelCase : Optional[Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__UpperCAmelCase : Dict = 1001
__UpperCAmelCase : str = '''imagenet-1k-id2label.json'''
__UpperCAmelCase : List[str] = '''huggingface/label-files'''
__UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type='''dataset''' ) , '''r''' ) )
__UpperCAmelCase : int = {int(lowercase_ ) + 1: v for k, v in idalabel.items()}
__UpperCAmelCase : Tuple = '''background'''
__UpperCAmelCase : str = idalabel
__UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
return config
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCAmelCase : Tuple = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=False ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Tuple = get_mobilenet_va_config(lowercase_ )
# Load 🤗 model
__UpperCAmelCase : int = MobileNetVaForImageClassification(lowercase_ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowercase_ , lowercase_ , lowercase_ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__UpperCAmelCase : List[str] = MobileNetVaImageProcessor(
crop_size={'''width''': config.image_size, '''height''': config.image_size} , size={'''shortest_edge''': config.image_size + 32} , )
__UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
__UpperCAmelCase : Union[str, Any] = model(**lowercase_ )
__UpperCAmelCase : Optional[Any] = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
__UpperCAmelCase : Any = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
__UpperCAmelCase : Dict = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
__UpperCAmelCase : str = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowercase_ , atol=1e-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowercase_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
print('''Pushing to the hub...''' )
__UpperCAmelCase : List[str] = '''google/''' + model_name
image_processor.push_to_hub(lowercase_ )
model.push_to_hub(lowercase_ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""mobilenet_v1_1.0_224""",
type=str,
help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""",
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCAmelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 675 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.