code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None ) -> None:
if start is None:
snake_case__ = 0
if end is None:
snake_case__ = len(__lowerCAmelCase ) - 1
if start >= end:
return
snake_case__ = (start + end) // 2
slowsort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
slowsort(__lowerCAmelCase , mid + 1 , __lowerCAmelCase )
if sequence[end] < sequence[mid]:
snake_case__ , snake_case__ = sequence[mid], sequence[end]
slowsort(__lowerCAmelCase , __lowerCAmelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 33 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowerCamelCase__ : int = logging.get_logger(__name__)
class __magic_name__ (snake_case_ ):
'''simple docstring'''
def __init__( self:List[Any] , *_a:Dict , **_a:Tuple ):
warnings.warn(
'''The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PerceiverImageProcessor instead.''' , _a , )
super().__init__(*_a , **_a )
| 33 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def __init__( self:str , _a:int , _a:str=7 , _a:Optional[int]=3 , _a:Dict=30 , _a:str=4_00 , _a:Optional[Any]=True , _a:Optional[Any]=None , _a:List[str]=True , _a:List[str]=[0.5, 0.5, 0.5] , _a:List[str]=[0.5, 0.5, 0.5] , _a:Tuple=True , _a:Tuple=1 / 2_55 , _a:int=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = num_channels
snake_case__ = min_resolution
snake_case__ = max_resolution
snake_case__ = do_resize
snake_case__ = size
snake_case__ = do_normalize
snake_case__ = image_mean
snake_case__ = image_std
snake_case__ = do_rescale
snake_case__ = rescale_factor
snake_case__ = do_pad
def SCREAMING_SNAKE_CASE__ ( self:Any ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE__ ( self:int , _a:Optional[int] , _a:List[str]=False ):
if not batched:
snake_case__ = image_inputs[0]
if isinstance(_a , Image.Image ):
snake_case__ , snake_case__ = image.size
else:
snake_case__ , snake_case__ = image.shape[1], image.shape[2]
if w < h:
snake_case__ = int(self.size['''shortest_edge'''] * h / w )
snake_case__ = self.size['''shortest_edge''']
elif w > h:
snake_case__ = self.size['''shortest_edge''']
snake_case__ = int(self.size['''shortest_edge'''] * w / h )
else:
snake_case__ = self.size['''shortest_edge''']
snake_case__ = self.size['''shortest_edge''']
else:
snake_case__ = []
for image in image_inputs:
snake_case__ , snake_case__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ = max(_a , key=lambda _a : item[0] )[0]
snake_case__ = max(_a , key=lambda _a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __magic_name__ (snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[int] = ConditionalDetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = ConditionalDetrImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , _a )
snake_case__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_a )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , _a )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
pass
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(_a , batched=_a )
snake_case__ = image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE__ ( self:int ):
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ = image_processing(_a , return_tensors='''pt''' ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(_a , batched=_a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ = image_processing(_a , return_tensors='''pt''' ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(_a , batched=_a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
# prepare image and target
snake_case__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
snake_case__ = json.loads(f.read() )
snake_case__ = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
snake_case__ = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' )
snake_case__ = image_processing(images=_a , annotations=_a , return_tensors='''pt''' )
# verify pixel values
snake_case__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , _a )
snake_case__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _a , atol=1e-4 ) )
# verify area
snake_case__ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _a ) )
# verify boxes
snake_case__ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _a )
snake_case__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _a , atol=1e-3 ) )
# verify image_id
snake_case__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _a ) )
# verify is_crowd
snake_case__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _a ) )
# verify class_labels
snake_case__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _a ) )
# verify orig_size
snake_case__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _a ) )
# verify size
snake_case__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _a ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self:int ):
# prepare image, target and masks_path
snake_case__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
snake_case__ = json.loads(f.read() )
snake_case__ = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
snake_case__ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
snake_case__ = ConditionalDetrImageProcessor(format='''coco_panoptic''' )
snake_case__ = image_processing(images=_a , annotations=_a , masks_path=_a , return_tensors='''pt''' )
# verify pixel values
snake_case__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , _a )
snake_case__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _a , atol=1e-4 ) )
# verify area
snake_case__ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _a ) )
# verify boxes
snake_case__ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _a )
snake_case__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _a , atol=1e-3 ) )
# verify image_id
snake_case__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _a ) )
# verify is_crowd
snake_case__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _a ) )
# verify class_labels
snake_case__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _a ) )
# verify masks
snake_case__ = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _a )
# verify orig_size
snake_case__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _a ) )
# verify size
snake_case__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _a ) )
| 33 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ : Tuple = {
"""configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""],
"""tokenization_roberta""": ["""RobertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = ["""RobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[int] = [
"""ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaForCausalLM""",
"""RobertaForMaskedLM""",
"""RobertaForMultipleChoice""",
"""RobertaForQuestionAnswering""",
"""RobertaForSequenceClassification""",
"""RobertaForTokenClassification""",
"""RobertaModel""",
"""RobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : List[str] = [
"""TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaForCausalLM""",
"""TFRobertaForMaskedLM""",
"""TFRobertaForMultipleChoice""",
"""TFRobertaForQuestionAnswering""",
"""TFRobertaForSequenceClassification""",
"""TFRobertaForTokenClassification""",
"""TFRobertaMainLayer""",
"""TFRobertaModel""",
"""TFRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : str = [
"""FlaxRobertaForCausalLM""",
"""FlaxRobertaForMaskedLM""",
"""FlaxRobertaForMultipleChoice""",
"""FlaxRobertaForQuestionAnswering""",
"""FlaxRobertaForSequenceClassification""",
"""FlaxRobertaForTokenClassification""",
"""FlaxRobertaModel""",
"""FlaxRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 33 | 1 |
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCamelCase__ : List[str] = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase__ : Optional[int] = importlib.util.spec_from_file_location(
"""transformers""",
os.path.join(PATH_TO_TRANSFORMERS, """__init__.py"""),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
lowerCamelCase__ : Tuple = spec.loader.load_module()
lowerCamelCase__ : Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCamelCase__ : Tuple = re.compile("""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
lowerCamelCase__ : int = {
"""CLIPConfigMixin""",
"""DecisionTransformerConfigMixin""",
"""EncoderDecoderConfigMixin""",
"""RagConfigMixin""",
"""SpeechEncoderDecoderConfigMixin""",
"""VisionEncoderDecoderConfigMixin""",
"""VisionTextDualEncoderConfigMixin""",
}
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
snake_case__ = []
for config_class in list(CONFIG_MAPPING.values() ):
snake_case__ = False
# source code of `config_class`
snake_case__ = inspect.getsource(__lowerCAmelCase )
snake_case__ = _re_checkpoint.findall(__lowerCAmelCase )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
snake_case__ , snake_case__ = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
snake_case__ = F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
snake_case__ = True
break
snake_case__ = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
snake_case__ = '''\n'''.join(sorted(__lowerCAmelCase ) )
raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 33 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[Any]:
snake_case__ = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __magic_name__ (snake_case_ ,snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Dict = StableDiffusionLatentUpscalePipeline
__lowercase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
__lowercase : List[Any] = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
__lowercase : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowercase : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__lowercase : List[Any] = frozenset([] )
__lowercase : Any = True
@property
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = 1
snake_case__ = 4
snake_case__ = (16, 16)
snake_case__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_a )
return image
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
torch.manual_seed(0 )
snake_case__ = UNetaDConditionModel(
act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=_a , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) , in_channels=8 , mid_block_type=_a , only_cross_attention=_a , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , )
snake_case__ = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
snake_case__ = EulerDiscreteScheduler(prediction_type='''sample''' )
snake_case__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''quick_gelu''' , projection_dim=5_12 , )
snake_case__ = CLIPTextModel(_a )
snake_case__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case__ = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:Optional[Any] , _a:List[str]=0 ):
if str(_a ).startswith('''mps''' ):
snake_case__ = torch.manual_seed(_a )
else:
snake_case__ = torch.Generator(device=_a ).manual_seed(_a )
snake_case__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = '''cpu'''
snake_case__ = self.get_dummy_components()
snake_case__ = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = self.get_dummy_inputs(_a )
snake_case__ = pipe(**_a ).images
snake_case__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_56, 2_56, 3) )
snake_case__ = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
snake_case__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_a , 1e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:str ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
super().test_save_load_local(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:str ):
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
snake_case__ = self.get_dummy_components()
snake_case__ = self.pipeline_class(**_a )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = self.get_dummy_inputs(_a )
snake_case__ = 2
snake_case__ = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
snake_case__ = getattr(_a , scheduler_enum.name )
snake_case__ = scheduler_cls.from_config(pipe.scheduler.config )
snake_case__ = pipe(**_a )[0]
outputs.append(_a )
assert check_same_shape(_a )
@require_torch_gpu
@slow
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = torch.manual_seed(33 )
snake_case__ = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
snake_case__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
snake_case__ = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
snake_case__ = pipe(_a , generator=_a , output_type='''latent''' ).images
snake_case__ = upscaler(
prompt=_a , image=_a , num_inference_steps=20 , guidance_scale=0 , generator=_a , output_type='''np''' , ).images[0]
snake_case__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5e-2
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = torch.manual_seed(33 )
snake_case__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
snake_case__ = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
snake_case__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
snake_case__ = upscaler(
prompt=_a , image=_a , num_inference_steps=20 , guidance_scale=0 , generator=_a , output_type='''np''' , ).images[0]
snake_case__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5e-2
| 33 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : List[Any] = {
"""asapp/sew-tiny-100k""": """https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json""",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Union[str, Any] = 'sew'
def __init__( self:Any , _a:Union[str, Any]=32 , _a:Optional[int]=7_68 , _a:Optional[int]=12 , _a:Any=12 , _a:List[Any]=30_72 , _a:List[str]=2 , _a:int="gelu" , _a:Any=0.1 , _a:Tuple=0.1 , _a:int=0.1 , _a:int=0.0 , _a:Any=0.1 , _a:Tuple=0.1 , _a:List[Any]=0.02 , _a:List[str]=1e-5 , _a:Union[str, Any]="group" , _a:Optional[int]="gelu" , _a:Optional[Any]=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , _a:Optional[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _a:Optional[int]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _a:Tuple=False , _a:Any=1_28 , _a:Optional[Any]=16 , _a:str=True , _a:Dict=0.05 , _a:Tuple=10 , _a:Optional[Any]=2 , _a:str=0.0 , _a:Union[str, Any]=10 , _a:Optional[Any]=0 , _a:Union[str, Any]="mean" , _a:Tuple=False , _a:List[str]=False , _a:Optional[Any]=2_56 , _a:Any=0 , _a:Any=1 , _a:List[str]=2 , **_a:Any , ):
super().__init__(**_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a )
snake_case__ = hidden_size
snake_case__ = feat_extract_norm
snake_case__ = feat_extract_activation
snake_case__ = list(_a )
snake_case__ = list(_a )
snake_case__ = list(_a )
snake_case__ = conv_bias
snake_case__ = num_conv_pos_embeddings
snake_case__ = num_conv_pos_embedding_groups
snake_case__ = len(self.conv_dim )
snake_case__ = num_hidden_layers
snake_case__ = intermediate_size
snake_case__ = squeeze_factor
snake_case__ = hidden_act
snake_case__ = num_attention_heads
snake_case__ = hidden_dropout
snake_case__ = attention_dropout
snake_case__ = activation_dropout
snake_case__ = feat_proj_dropout
snake_case__ = final_dropout
snake_case__ = layerdrop
snake_case__ = layer_norm_eps
snake_case__ = initializer_range
snake_case__ = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
F"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case__ = apply_spec_augment
snake_case__ = mask_time_prob
snake_case__ = mask_time_length
snake_case__ = mask_time_min_masks
snake_case__ = mask_feature_prob
snake_case__ = mask_feature_length
snake_case__ = mask_feature_min_masks
# ctc loss
snake_case__ = ctc_loss_reduction
snake_case__ = ctc_zero_infinity
# sequence classification
snake_case__ = use_weighted_layer_sum
snake_case__ = classifier_proj_size
@property
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 33 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = '''ZinengTang/tvlt-base'''
snake_case__ = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE__ ( self:Dict , **_a:List[Any] ):
return TvltImageProcessor.from_pretrained(self.checkpoint , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , **_a:Tuple ):
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
processor.save_pretrained(self.tmpdirname )
snake_case__ = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , _a )
self.assertIsInstance(processor.image_processor , _a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
snake_case__ = np.ones([1_20_00] )
snake_case__ = feature_extractor(_a , return_tensors='''np''' )
snake_case__ = processor(audio=_a , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
snake_case__ = np.ones([3, 2_24, 2_24] )
snake_case__ = image_processor(_a , return_tensors='''np''' )
snake_case__ = processor(images=_a , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
snake_case__ = np.ones([1_20_00] )
snake_case__ = np.ones([3, 2_24, 2_24] )
snake_case__ = processor(audio=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 33 | 1 |
from math import isqrt
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> bool:
return all(number % divisor != 0 for divisor in range(2 , isqrt(__lowerCAmelCase ) + 1 ) )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 10**6 ) -> int:
snake_case__ = 0
snake_case__ = 1
snake_case__ = 7
while prime_candidate < max_prime:
primes_count += is_prime(__lowerCAmelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 33 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Optional[int] = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Optional[int] = 'data2vec-vision'
def __init__( self:int , _a:Tuple=7_68 , _a:int=12 , _a:Any=12 , _a:Optional[int]=30_72 , _a:Optional[int]="gelu" , _a:Any=0.0 , _a:Any=0.0 , _a:List[str]=0.02 , _a:Dict=1e-12 , _a:Tuple=2_24 , _a:Any=16 , _a:str=3 , _a:str=False , _a:Union[str, Any]=False , _a:Optional[int]=False , _a:Any=False , _a:Dict=0.1 , _a:Dict=0.1 , _a:str=True , _a:str=[3, 5, 7, 11] , _a:List[str]=[1, 2, 3, 6] , _a:List[str]=True , _a:Any=0.4 , _a:str=2_56 , _a:Union[str, Any]=1 , _a:int=False , _a:Optional[int]=2_55 , **_a:Dict , ):
super().__init__(**_a )
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = initializer_range
snake_case__ = layer_norm_eps
snake_case__ = image_size
snake_case__ = patch_size
snake_case__ = num_channels
snake_case__ = use_mask_token
snake_case__ = use_absolute_position_embeddings
snake_case__ = use_relative_position_bias
snake_case__ = use_shared_relative_position_bias
snake_case__ = layer_scale_init_value
snake_case__ = drop_path_rate
snake_case__ = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case__ = out_indices
snake_case__ = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case__ = use_auxiliary_head
snake_case__ = auxiliary_loss_weight
snake_case__ = auxiliary_channels
snake_case__ = auxiliary_num_convs
snake_case__ = auxiliary_concat_input
snake_case__ = semantic_loss_ignore_index
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Any = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return 1e-4
| 33 | 1 |
# Imports
import numpy as np
class __magic_name__ :
'''simple docstring'''
def __init__( self:List[str] , _a:Union[str, Any]=None , _a:List[Any]=None , _a:Tuple=None , _a:Dict=None , _a:int=None ):
self.set_matricies(red=_a , green=_a , blue=_a , red_edge=_a , nir=_a )
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:Dict=None , _a:Any=None , _a:Optional[int]=None , _a:str=None , _a:Optional[int]=None ):
if red is not None:
snake_case__ = red
if green is not None:
snake_case__ = green
if blue is not None:
snake_case__ = blue
if red_edge is not None:
snake_case__ = red_edge
if nir is not None:
snake_case__ = nir
return True
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:Optional[Any]="" , _a:List[str]=None , _a:Optional[int]=None , _a:List[Any]=None , _a:Any=None , _a:List[str]=None ):
self.set_matricies(red=_a , green=_a , blue=_a , red_edge=_a , nir=_a )
snake_case__ = {
'''ARVI2''': self.arvaa,
'''CCCI''': self.ccci,
'''CVI''': self.cvi,
'''GLI''': self.gli,
'''NDVI''': self.ndvi,
'''BNDVI''': self.bndvi,
'''redEdgeNDVI''': self.red_edge_ndvi,
'''GNDVI''': self.gndvi,
'''GBNDVI''': self.gbndvi,
'''GRNDVI''': self.grndvi,
'''RBNDVI''': self.rbndvi,
'''PNDVI''': self.pndvi,
'''ATSAVI''': self.atsavi,
'''BWDRVI''': self.bwdrvi,
'''CIgreen''': self.ci_green,
'''CIrededge''': self.ci_rededge,
'''CI''': self.ci,
'''CTVI''': self.ctvi,
'''GDVI''': self.gdvi,
'''EVI''': self.evi,
'''GEMI''': self.gemi,
'''GOSAVI''': self.gosavi,
'''GSAVI''': self.gsavi,
'''Hue''': self.hue,
'''IVI''': self.ivi,
'''IPVI''': self.ipvi,
'''I''': self.i,
'''RVI''': self.rvi,
'''MRVI''': self.mrvi,
'''MSAVI''': self.m_savi,
'''NormG''': self.norm_g,
'''NormNIR''': self.norm_nir,
'''NormR''': self.norm_r,
'''NGRDI''': self.ngrdi,
'''RI''': self.ri,
'''S''': self.s,
'''IF''': self._if,
'''DVI''': self.dvi,
'''TVI''': self.tvi,
'''NDRE''': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('''Index not in the list!''' )
return False
def SCREAMING_SNAKE_CASE__ ( self:Any ):
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def SCREAMING_SNAKE_CASE__ ( self:Any ):
return self.nir * (self.red / (self.green**2))
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return (self.nir - self.red) / (self.nir + self.red)
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
return (self.nir - self.blue) / (self.nir + self.blue)
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
return (self.nir - self.green) / (self.nir + self.green)
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:Optional[Any]=0.08 , _a:Dict=1.22 , _a:str=0.03 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def SCREAMING_SNAKE_CASE__ ( self:Any ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
return (self.nir / self.green) - 1
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return (self.nir / self.redEdge) - 1
def SCREAMING_SNAKE_CASE__ ( self:int ):
return (self.red - self.blue) / self.red
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return self.nir - self.green
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Any=0.16 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:Any=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:List[Any]=None , _a:Optional[int]=None ):
return (self.nir - b) / (a * self.red)
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
return (self.red + self.green + self.blue) / 30.5
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
return self.nir / self.red
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return (self.rvi() - 1) / (self.rvi() + 1)
def SCREAMING_SNAKE_CASE__ ( self:int ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return self.green / (self.nir + self.red + self.green)
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
return self.nir / (self.nir + self.red + self.green)
def SCREAMING_SNAKE_CASE__ ( self:str ):
return self.red / (self.nir + self.red + self.green)
def SCREAMING_SNAKE_CASE__ ( self:Any ):
return (self.green - self.red) / (self.green + self.red)
def SCREAMING_SNAKE_CASE__ ( self:int ):
return (self.red - self.green) / (self.red + self.green)
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
snake_case__ = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return self.nir / self.red
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return (self.ndvi() + 0.5) ** (1 / 2)
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 33 |
import os
import sys
lowerCamelCase__ : Optional[int] = os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowerCamelCase__ : Optional[int] = [
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Any:
return AutoConfig.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[str]:
return AutoTokenizer.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModel.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Tuple:
return AutoModel.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]:
return AutoModelForCausalLM.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[Any]:
return AutoModelForMaskedLM.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[str]:
return AutoModelForSequenceClassification.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]:
return AutoModelForQuestionAnswering.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
| 33 | 1 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCamelCase__ : Tuple = """<<<<<<< This should probably be modified because it mentions: """
lowerCamelCase__ : str = """=======
>>>>>>>
"""
lowerCamelCase__ : List[Any] = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
lowerCamelCase__ : Dict = [
# (pattern, replacement)
# Order is important here for some replacements
(r"""tfds\.core""", r"""datasets"""),
(r"""tf\.io\.gfile\.GFile""", r"""open"""),
(r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""),
(r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""),
(r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""),
(r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""),
(r"""tfds\.features\.FeaturesDict\(""", r"""dict("""),
(r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(r"""tfds\.""", r"""datasets."""),
(r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""),
(r"""self\.builder_config""", r"""self.config"""),
]
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Tuple:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __magic_name__ (snake_case_ ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _a:ArgumentParser ):
snake_case__ = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=_a , required=_a , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=_a , required=_a , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=_a )
def __init__( self:Any , _a:str , _a:str , *_a:Any ):
snake_case__ = get_logger('''datasets-cli/converting''' )
snake_case__ = tfds_path
snake_case__ = datasets_directory
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
if os.path.isdir(self._tfds_path ):
snake_case__ = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
snake_case__ = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
snake_case__ = os.path.abspath(self._datasets_directory )
self._logger.info(F"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
snake_case__ = []
snake_case__ = []
snake_case__ = {}
if os.path.isdir(self._tfds_path ):
snake_case__ = os.listdir(_a )
else:
snake_case__ = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F"""Looking at file {f_name}""" )
snake_case__ = os.path.join(_a , _a )
snake_case__ = os.path.join(_a , _a )
if not os.path.isfile(_a ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(_a , encoding='''utf-8''' ) as f:
snake_case__ = f.readlines()
snake_case__ = []
snake_case__ = False
snake_case__ = False
snake_case__ = []
for line in lines:
snake_case__ = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
snake_case__ = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
snake_case__ = ''''''
continue
elif "from absl import logging" in out_line:
snake_case__ = '''from datasets import logging\n'''
elif "getLogger" in out_line:
snake_case__ = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
snake_case__ = True
snake_case__ = list(filter(lambda _a : e in out_line , _a ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_a ) + '''\n''' )
out_lines.append(_a )
out_lines.append(_a )
continue
else:
for pattern, replacement in TO_CONVERT:
snake_case__ = re.sub(_a , _a , _a )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
snake_case__ = re.match(r'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , _a )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
snake_case__ = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
snake_case__ = True
out_lines.append(_a )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
snake_case__ = f_name.replace('''.py''' , '''''' )
snake_case__ = os.path.join(_a , _a )
snake_case__ = os.path.join(_a , _a )
os.makedirs(_a , exist_ok=_a )
self._logger.info(F"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_a )
if needs_manual_update:
with_manual_update.append(_a )
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(_a )
self._logger.info(F"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
snake_case__ = os.path.basename(_a )
snake_case__ = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(F"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(_a , _a )
except KeyError:
self._logger.error(F"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
| 33 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : str = (CMStochasticIterativeScheduler,)
__lowercase : List[str] = 10
def SCREAMING_SNAKE_CASE__ ( self:int , **_a:Optional[int] ):
snake_case__ = {
'''num_train_timesteps''': 2_01,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
config.update(**_a )
return config
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = 10
snake_case__ = self.get_scheduler_config()
snake_case__ = self.scheduler_classes[0](**_a )
scheduler.set_timesteps(_a )
snake_case__ = scheduler.timesteps[0]
snake_case__ = scheduler.timesteps[1]
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
snake_case__ = scheduler.step(_a , _a , _a ).prev_sample
snake_case__ = scheduler.step(_a , _a , _a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_a )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=_a )
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = 1
scheduler.set_timesteps(_a )
snake_case__ = scheduler.timesteps
snake_case__ = torch.manual_seed(0 )
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(_a ):
# 1. scale model input
snake_case__ = scheduler.scale_model_input(_a , _a )
# 2. predict noise residual
snake_case__ = model(_a , _a )
# 3. predict previous sample x_t-1
snake_case__ = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
snake_case__ = pred_prev_sample
snake_case__ = torch.sum(torch.abs(_a ) )
snake_case__ = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 192.7614 ) < 1e-2
assert abs(result_mean.item() - 0.2510 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = [1_06, 0]
scheduler.set_timesteps(timesteps=_a )
snake_case__ = scheduler.timesteps
snake_case__ = torch.manual_seed(0 )
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
snake_case__ = scheduler.scale_model_input(_a , _a )
# 2. predict noise residual
snake_case__ = model(_a , _a )
# 3. predict previous sample x_t-1
snake_case__ = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
snake_case__ = pred_prev_sample
snake_case__ = torch.sum(torch.abs(_a ) )
snake_case__ = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 347.6357 ) < 1e-2
assert abs(result_mean.item() - 0.4527 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = [39, 30, 12, 15, 0]
with self.assertRaises(_a , msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_a )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = [39, 30, 12, 1, 0]
snake_case__ = len(_a )
with self.assertRaises(_a , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_a , timesteps=_a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_a , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_a )
| 33 | 1 |
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list:
snake_case__ = len(__lowerCAmelCase )
snake_case__ = [[0] * n for i in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
snake_case__ = y_points[i]
for i in range(2 , __lowerCAmelCase ):
for j in range(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33 |
import numpy as np
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> np.ndarray:
return 1 / (1 + np.exp(-vector ))
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> np.ndarray:
return vector * sigmoid(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase__ : Optional[int] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ : Dict = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
lowerCamelCase__ : Optional[int] = {
"""google/realm-cc-news-pretrained-embedder""": 5_1_2,
"""google/realm-cc-news-pretrained-encoder""": 5_1_2,
"""google/realm-cc-news-pretrained-scorer""": 5_1_2,
"""google/realm-cc-news-pretrained-openqa""": 5_1_2,
"""google/realm-orqa-nq-openqa""": 5_1_2,
"""google/realm-orqa-nq-reader""": 5_1_2,
"""google/realm-orqa-wq-openqa""": 5_1_2,
"""google/realm-orqa-wq-reader""": 5_1_2,
}
lowerCamelCase__ : Optional[Any] = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Optional[Any] = VOCAB_FILES_NAMES
__lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[Any] = PRETRAINED_INIT_CONFIGURATION
__lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Union[str, Any] = RealmTokenizer
def __init__( self:Dict , _a:List[str]=None , _a:Union[str, Any]=None , _a:Dict=True , _a:List[Any]="[UNK]" , _a:Any="[SEP]" , _a:Union[str, Any]="[PAD]" , _a:int="[CLS]" , _a:Tuple="[MASK]" , _a:List[Any]=True , _a:str=None , **_a:Any , ):
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , tokenize_chinese_chars=_a , strip_accents=_a , **_a , )
snake_case__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _a ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _a ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _a ) != tokenize_chinese_chars
):
snake_case__ = getattr(_a , normalizer_state.pop('''type''' ) )
snake_case__ = do_lower_case
snake_case__ = strip_accents
snake_case__ = tokenize_chinese_chars
snake_case__ = normalizer_class(**_a )
snake_case__ = do_lower_case
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:int , **_a:int ):
snake_case__ = PaddingStrategy.MAX_LENGTH
snake_case__ = text
snake_case__ = kwargs.pop('''text_pair''' , _a )
snake_case__ = kwargs.pop('''return_tensors''' , _a )
snake_case__ = {
'''input_ids''': [],
'''attention_mask''': [],
'''token_type_ids''': [],
}
for idx, candidate_text in enumerate(_a ):
if batch_text_pair is not None:
snake_case__ = batch_text_pair[idx]
else:
snake_case__ = None
snake_case__ = super().__call__(_a , _a , return_tensors=_a , **_a )
snake_case__ = encoded_candidates.get('''input_ids''' )
snake_case__ = encoded_candidates.get('''attention_mask''' )
snake_case__ = encoded_candidates.get('''token_type_ids''' )
if encoded_input_ids is not None:
output_data["input_ids"].append(_a )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(_a )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(_a )
snake_case__ = {key: item for key, item in output_data.items() if len(_a ) != 0}
return BatchEncoding(_a , tensor_type=_a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:int , _a:int=None ):
snake_case__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:List[int] , _a:Optional[List[int]] = None ):
snake_case__ = [self.sep_token_id]
snake_case__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:str , _a:Optional[str] = None ):
snake_case__ = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
| 33 |
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 100 ) -> int:
snake_case__ = set()
snake_case__ = 0
snake_case__ = n + 1 # maximum limit
for a in range(2 , __lowerCAmelCase ):
for b in range(2 , __lowerCAmelCase ):
snake_case__ = a**b # calculates the current power
collect_powers.add(__lowerCAmelCase ) # adds the result to the set
return len(__lowerCAmelCase )
if __name__ == "__main__":
print("""Number of terms """, solution(int(str(input()).strip())))
| 33 | 1 |
import argparse
import os
import re
lowerCamelCase__ : Optional[int] = """src/transformers/models/auto"""
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
lowerCamelCase__ : List[Any] = re.compile(r"""[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict""")
# re pattern that matches identifiers in mappings
lowerCamelCase__ : Dict = re.compile(r"""\s*\(\s*\"(\S[^\"]+)\"""")
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase = False ) -> Union[str, Any]:
with open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
snake_case__ = f.read()
snake_case__ = content.split('''\n''' )
snake_case__ = []
snake_case__ = 0
while line_idx < len(__lowerCAmelCase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
snake_case__ = len(re.search(r'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
snake_case__ = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
snake_case__ = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
snake_case__ = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : _re_identifier.search(__lowerCAmelCase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(__lowerCAmelCase ) )
elif "\n".join(__lowerCAmelCase ) != content:
return True
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = False ) -> Tuple:
snake_case__ = [os.path.join(__lowerCAmelCase , __lowerCAmelCase ) for f in os.listdir(__lowerCAmelCase ) if f.endswith('''.py''' )]
snake_case__ = [sort_auto_mapping(__lowerCAmelCase , overwrite=__lowerCAmelCase ) for fname in fnames]
if not overwrite and any(__lowerCAmelCase ):
snake_case__ = [f for f, d in zip(__lowerCAmelCase , __lowerCAmelCase ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {', '.join(__lowerCAmelCase )}. Run `make style` to fix"""
''' this.''' )
if __name__ == "__main__":
lowerCamelCase__ : Any = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
lowerCamelCase__ : Union[str, Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 33 |
from copy import deepcopy
class __magic_name__ :
'''simple docstring'''
def __init__( self:int , _a:list[int] | None = None , _a:int | None = None ):
if arr is None and size is not None:
snake_case__ = size
snake_case__ = [0] * size
elif arr is not None:
self.init(_a )
else:
raise ValueError('''Either arr or size must be specified''' )
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:list[int] ):
snake_case__ = len(_a )
snake_case__ = deepcopy(_a )
for i in range(1 , self.size ):
snake_case__ = self.next_(_a )
if j < self.size:
self.tree[j] += self.tree[i]
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
snake_case__ = self.next_(_a )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _a:int ):
return index + (index & (-index))
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _a:int ):
return index - (index & (-index))
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:int , _a:int ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
snake_case__ = self.next_(_a )
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:int ):
self.add(_a , value - self.get(_a ) )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:int ):
if right == 0:
return 0
snake_case__ = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
snake_case__ = self.prev(_a )
return result
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:int ):
return self.prefix(_a ) - self.prefix(_a )
def SCREAMING_SNAKE_CASE__ ( self:str , _a:int ):
return self.query(_a , index + 1 )
def SCREAMING_SNAKE_CASE__ ( self:str , _a:int ):
value -= self.tree[0]
if value < 0:
return -1
snake_case__ = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
snake_case__ = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33 | 1 |
from __future__ import annotations
import math
from collections.abc import Callable
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 100 , ) -> float:
snake_case__ = x_start
snake_case__ = fnc(__lowerCAmelCase )
snake_case__ = 0.0
for _ in range(__lowerCAmelCase ):
# Approximates curve as a sequence of linear lines and sums their length
snake_case__ = (x_end - x_start) / steps + xa
snake_case__ = fnc(__lowerCAmelCase )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
snake_case__ = xa
snake_case__ = fxa
return length
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[Any]:
return math.sin(10 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
lowerCamelCase__ : List[str] = 1_0
while i <= 1_0_0_0_0_0:
print(F"""With {i} steps: {line_length(f, -1_0, 1_0, i)}""")
i *= 1_0
| 33 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class __magic_name__ :
'''simple docstring'''
__lowercase : int = BlenderbotConfig
__lowercase : Any = {}
__lowercase : Optional[Any] = 'gelu'
def __init__( self:Tuple , _a:Optional[Any] , _a:Optional[Any]=13 , _a:Tuple=7 , _a:Union[str, Any]=True , _a:int=False , _a:int=99 , _a:Optional[int]=32 , _a:List[str]=2 , _a:List[str]=4 , _a:List[Any]=37 , _a:Any=0.1 , _a:int=0.1 , _a:List[Any]=20 , _a:List[str]=2 , _a:int=1 , _a:Dict=0 , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = eos_token_id
snake_case__ = pad_token_id
snake_case__ = bos_token_id
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case__ = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case__ = prepare_blenderbot_inputs_dict(_a , _a , _a )
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self:int , _a:Optional[Any] , _a:int ):
snake_case__ = TFBlenderbotModel(config=_a ).get_decoder()
snake_case__ = inputs_dict['''input_ids''']
snake_case__ = input_ids[:1, :]
snake_case__ = inputs_dict['''attention_mask'''][:1, :]
snake_case__ = inputs_dict['''head_mask''']
snake_case__ = 1
# first forward pass
snake_case__ = model(_a , attention_mask=_a , head_mask=_a , use_cache=_a )
snake_case__ , snake_case__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
snake_case__ = tf.concat([input_ids, next_tokens] , axis=-1 )
snake_case__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
snake_case__ = model(_a , attention_mask=_a )[0]
snake_case__ = model(_a , attention_mask=_a , past_key_values=_a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
snake_case__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
snake_case__ = output_from_no_past[:, -3:, random_slice_idx]
snake_case__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_a , _a , rtol=1e-3 )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> Tuple:
if attention_mask is None:
snake_case__ = tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : List[str] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__lowercase : Any = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__lowercase : Tuple = (
{
'conversational': TFBlenderbotForConditionalGeneration,
'feature-extraction': TFBlenderbotModel,
'summarization': TFBlenderbotForConditionalGeneration,
'text2text-generation': TFBlenderbotForConditionalGeneration,
'translation': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowercase : Any = True
__lowercase : int = False
__lowercase : int = False
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = TFBlenderbotModelTester(self )
snake_case__ = ConfigTester(self , config_class=_a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_a )
@require_tokenizers
@require_tf
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[int] = ['My friends are cool but they eat too many carbs.']
__lowercase : Optional[int] = 'facebook/blenderbot-400M-distill'
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.tokenizer(self.src_text , return_tensors='''tf''' )
snake_case__ = self.model.generate(
model_inputs.input_ids , )
snake_case__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_a )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 33 | 1 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
lowerCamelCase__ : Tuple = """bart"""
lowerCamelCase__ : Dict = True
@st.cache(allow_output_mutation=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
if LOAD_DENSE_INDEX:
snake_case__ = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
snake_case__ = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
snake_case__ = qar_model.eval()
else:
snake_case__ , snake_case__ = (None, None)
if MODEL_TYPE == "bart":
snake_case__ = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
snake_case__ = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
snake_case__ = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
snake_case__ = sas_model.eval()
else:
snake_case__ , snake_case__ = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> int:
if LOAD_DENSE_INDEX:
snake_case__ = faiss.StandardGpuResources()
snake_case__ = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
snake_case__ = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
snake_case__ = faiss.IndexFlatIP(128 )
snake_case__ = faiss.index_cpu_to_gpu(__lowerCAmelCase , 1 , __lowerCAmelCase )
wikiaab_gpu_index_flat.add(__lowerCAmelCase ) # TODO fix for larger GPU
else:
snake_case__ , snake_case__ = (None, None)
snake_case__ = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
snake_case__ = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
snake_case__ = elia['''train_eli5''']
snake_case__ = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
snake_case__ = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(__lowerCAmelCase )
return (elia_train, eli5_train_q_index)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = load_indexes()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = load_models()
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = load_train_data()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=10 ) -> List[Any]:
snake_case__ = embed_questions_for_retrieval([question] , __lowerCAmelCase , __lowerCAmelCase )
snake_case__ , snake_case__ = eli5_train_q_index.search(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ = [elia_train[int(__lowerCAmelCase )] for i in I[0]]
return nn_examples
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase="wiki40b" , __lowerCAmelCase="dense" , __lowerCAmelCase=10 ) -> int:
if source == "none":
snake_case__ , snake_case__ = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
snake_case__ , snake_case__ = query_qa_dense_index(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
snake_case__ , snake_case__ = query_es_index(
__lowerCAmelCase , __lowerCAmelCase , index_name='''english_wiki40b_snippets_100w''' , n_results=__lowerCAmelCase , )
snake_case__ = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
snake_case__ = '''question: {} context: {}'''.format(__lowerCAmelCase , __lowerCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda __lowerCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __lowerCAmelCase : None),
} )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=64 , __lowerCAmelCase=256 , __lowerCAmelCase=False , __lowerCAmelCase=2 , __lowerCAmelCase=0.95 , __lowerCAmelCase=0.8 ) -> Dict:
with torch.no_grad():
snake_case__ = qa_sas_generate(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , num_answers=1 , num_beams=__lowerCAmelCase , min_len=__lowerCAmelCase , max_len=__lowerCAmelCase , do_sample=__lowerCAmelCase , temp=__lowerCAmelCase , top_p=__lowerCAmelCase , top_k=__lowerCAmelCase , max_input_length=1024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
lowerCamelCase__ : int = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
lowerCamelCase__ : List[Any] = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
lowerCamelCase__ : Any = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
lowerCamelCase__ : str = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
lowerCamelCase__ : Tuple = st.sidebar.checkbox("""Demo options""")
if demo_options:
lowerCamelCase__ : List[str] = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
lowerCamelCase__ : Union[str, Any] = action_list.index(action_st)
lowerCamelCase__ : List[Any] = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
lowerCamelCase__ : int = show_type == """Show full text of passages"""
else:
lowerCamelCase__ : str = 3
lowerCamelCase__ : Any = True
lowerCamelCase__ : Tuple = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
lowerCamelCase__ : Optional[int] = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
lowerCamelCase__ : Optional[int] = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
lowerCamelCase__ : int = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
lowerCamelCase__ : str = """wiki40b"""
lowerCamelCase__ : str = """dense"""
lowerCamelCase__ : int = """beam"""
lowerCamelCase__ : Optional[int] = 2
lowerCamelCase__ : int = 6_4
lowerCamelCase__ : List[Any] = 2_5_6
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : List[str] = None
lowerCamelCase__ : str = st.sidebar.checkbox("""Generation options""")
if generate_options:
lowerCamelCase__ : int = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
lowerCamelCase__ : List[str] = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
lowerCamelCase__ : Optional[Any] = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=2_5_6, value=6_4, step=8, format=None, key=None
)
lowerCamelCase__ : Optional[int] = st.sidebar.slider(
"""Maximum generation length""", min_value=6_4, max_value=5_1_2, value=2_5_6, step=1_6, format=None, key=None
)
if sampled == "beam":
lowerCamelCase__ : Optional[int] = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
lowerCamelCase__ : Optional[Any] = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None
)
lowerCamelCase__ : Union[str, Any] = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None
)
lowerCamelCase__ : Dict = None
# start main text
lowerCamelCase__ : Union[str, Any] = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
lowerCamelCase__ : Optional[Any] = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
lowerCamelCase__ : Dict = st.text_input("""Enter your question here:""", """""")
else:
lowerCamelCase__ : Union[str, Any] = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
lowerCamelCase__ , lowerCamelCase__ : List[str] = make_support(question, source=wiki_source, method="""dense""", n_results=1_0)
lowerCamelCase__ , lowerCamelCase__ : Any = make_support(question, source=wiki_source, method="""sparse""", n_results=1_0)
lowerCamelCase__ : Optional[Any] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
lowerCamelCase__ : Union[str, Any] = support_list[:1_0]
lowerCamelCase__ : Optional[Any] = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = make_support(question, source=wiki_source, method=index_type, n_results=1_0)
if action in [0, 3]:
lowerCamelCase__ , lowerCamelCase__ : Any = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
lowerCamelCase__ : Tuple = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
lowerCamelCase__ : List[Any] = res[1].strip()
if sec_titles == "":
lowerCamelCase__ : int = """[{}]({})""".format(res[0], wiki_url)
else:
lowerCamelCase__ : str = sec_titles.split(""" & """)
lowerCamelCase__ : int = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
lowerCamelCase__ : List[Any] = find_nearest_training(question)
lowerCamelCase__ : int = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
lowerCamelCase__ : Optional[int] = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
lowerCamelCase__ : Union[str, Any] = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 33 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = 0
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:str ):
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
snake_case__ = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
snake_case__ = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
snake_case__ = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
snake_case__ = AutoImageProcessor.from_pretrained(_a ).to_dict()
config_dict.pop('''image_processor_type''' )
snake_case__ = CLIPImageProcessor(**_a )
# save in new folder
model_config.save_pretrained(_a )
config.save_pretrained(_a )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
# make sure private variable is not incorrectly saved
snake_case__ = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
with self.assertRaisesRegex(
_a , '''clip-base is not a local folder and is not a valid model identifier''' ):
snake_case__ = AutoImageProcessor.from_pretrained('''clip-base''' )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
with self.assertRaisesRegex(
_a , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
snake_case__ = AutoImageProcessor.from_pretrained(_a , revision='''aaaaaa''' )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
with self.assertRaisesRegex(
_a , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_a ):
snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
snake_case__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
snake_case__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
snake_case__ = AutoImageProcessor.from_pretrained(_a , trust_remote_code=_a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoImageProcessor.register(_a , _a )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
snake_case__ = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
snake_case__ = CustomImageProcessor.from_pretrained(_a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : List[str] = True
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# If remote code is not set, the default is to use local
snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
snake_case__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
snake_case__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(_a , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 33 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : List[Any] = {
"""configuration_x_clip""": [
"""XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XCLIPConfig""",
"""XCLIPTextConfig""",
"""XCLIPVisionConfig""",
],
"""processing_x_clip""": ["""XCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[Any] = [
"""XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XCLIPModel""",
"""XCLIPPreTrainedModel""",
"""XCLIPTextModel""",
"""XCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 33 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ : int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=False ) -> int:
snake_case__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case__ = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ) -> Dict:
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ = ''''''
else:
snake_case__ = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
snake_case__ = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ = in_proj_weight[
: config.hidden_size, :
]
snake_case__ = in_proj_bias[: config.hidden_size]
snake_case__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[Any]:
snake_case__ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
snake_case__ = dct.pop(__lowerCAmelCase )
snake_case__ = val
def SCREAMING_SNAKE_CASE ( ) -> str:
snake_case__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
snake_case__ = ViTConfig()
snake_case__ = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
snake_case__ = True
snake_case__ = int(vit_name[-12:-10] )
snake_case__ = int(vit_name[-9:-6] )
else:
snake_case__ = 1000
snake_case__ = '''huggingface/label-files'''
snake_case__ = '''imagenet-1k-id2label.json'''
snake_case__ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
snake_case__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case__ = idalabel
snake_case__ = {v: k for k, v in idalabel.items()}
snake_case__ = int(vit_name[-6:-4] )
snake_case__ = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('''tiny''' ):
snake_case__ = 192
snake_case__ = 768
snake_case__ = 12
snake_case__ = 3
elif vit_name[9:].startswith('''small''' ):
snake_case__ = 384
snake_case__ = 1536
snake_case__ = 12
snake_case__ = 6
else:
pass
else:
if vit_name[4:].startswith('''small''' ):
snake_case__ = 768
snake_case__ = 2304
snake_case__ = 8
snake_case__ = 8
elif vit_name[4:].startswith('''base''' ):
pass
elif vit_name[4:].startswith('''large''' ):
snake_case__ = 1024
snake_case__ = 4096
snake_case__ = 24
snake_case__ = 16
elif vit_name[4:].startswith('''huge''' ):
snake_case__ = 1280
snake_case__ = 5120
snake_case__ = 32
snake_case__ = 16
# load original model from timm
snake_case__ = timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ = timm_model.state_dict()
if base_model:
remove_classification_head_(__lowerCAmelCase )
snake_case__ = create_rename_keys(__lowerCAmelCase , __lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case__ = ViTModel(__lowerCAmelCase ).eval()
else:
snake_case__ = ViTForImageClassification(__lowerCAmelCase ).eval()
model.load_state_dict(__lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
snake_case__ = DeiTImageProcessor(size=config.image_size )
else:
snake_case__ = ViTImageProcessor(size=config.image_size )
snake_case__ = image_processor(images=prepare_img() , return_tensors='''pt''' )
snake_case__ = encoding['''pixel_values''']
snake_case__ = model(__lowerCAmelCase )
if base_model:
snake_case__ = timm_model.forward_features(__lowerCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__lowerCAmelCase , outputs.pooler_output , atol=1e-3 )
else:
snake_case__ = timm_model(__lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCAmelCase , outputs.logits , atol=1e-3 )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_patch16_224""",
type=str,
help="""Name of the ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowerCamelCase__ : str = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 33 | 1 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __magic_name__ (snake_case_ ):
'''simple docstring'''
def __init__( self:List[str] , _a:pyspark.sql.DataFrame , _a:Optional[NamedSplit] = None , _a:Optional[Features] = None , _a:bool = True , _a:str = None , _a:bool = False , _a:str = None , _a:bool = True , _a:str = "arrow" , **_a:List[str] , ):
super().__init__(
split=_a , features=_a , cache_dir=_a , keep_in_memory=_a , streaming=_a , **_a , )
snake_case__ = load_from_cache_file
snake_case__ = file_format
snake_case__ = Spark(
df=_a , features=_a , cache_dir=_a , working_dir=_a , **_a , )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
snake_case__ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=_a , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 33 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : List[str] = ['image_processor', 'tokenizer']
__lowercase : str = 'AutoImageProcessor'
__lowercase : Dict = 'AutoTokenizer'
def __init__( self:int , _a:List[str]=None , _a:Optional[Any]=None , **_a:List[str] ):
snake_case__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
snake_case__ = kwargs.pop('''feature_extractor''' )
snake_case__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
snake_case__ = self.image_processor
snake_case__ = False
def __call__( self:Optional[int] , *_a:str , **_a:int ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_a , **_a )
snake_case__ = kwargs.pop('''images''' , _a )
snake_case__ = kwargs.pop('''text''' , _a )
if len(_a ) > 0:
snake_case__ = args[0]
snake_case__ = args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
snake_case__ = self.image_processor(_a , *_a , **_a )
if text is not None:
snake_case__ = self.tokenizer(_a , **_a )
if text is None:
return inputs
elif images is None:
return encodings
else:
snake_case__ = encodings['''input_ids''']
return inputs
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , *_a:Union[str, Any] , **_a:Any ):
return self.tokenizer.batch_decode(*_a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple , *_a:Union[str, Any] , **_a:Optional[int] ):
return self.tokenizer.decode(*_a , **_a )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
snake_case__ = True
snake_case__ = self.tokenizer
yield
snake_case__ = self.image_processor
snake_case__ = False
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Dict , _a:Dict=False , _a:Optional[int]=None ):
if added_vocab is None:
snake_case__ = self.tokenizer.get_added_vocab()
snake_case__ = {}
while tokens:
snake_case__ = re.search(r'''<s_(.*?)>''' , _a , re.IGNORECASE )
if start_token is None:
break
snake_case__ = start_token.group(1 )
snake_case__ = re.search(rF"""</s_{key}>""" , _a , re.IGNORECASE )
snake_case__ = start_token.group()
if end_token is None:
snake_case__ = tokens.replace(_a , '''''' )
else:
snake_case__ = end_token.group()
snake_case__ = re.escape(_a )
snake_case__ = re.escape(_a )
snake_case__ = re.search(F"""{start_token_escaped}(.*?){end_token_escaped}""" , _a , re.IGNORECASE )
if content is not None:
snake_case__ = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
snake_case__ = self.tokenajson(_a , is_inner_value=_a , added_vocab=_a )
if value:
if len(_a ) == 1:
snake_case__ = value[0]
snake_case__ = value
else: # leaf nodes
snake_case__ = []
for leaf in content.split(r'''<sep/>''' ):
snake_case__ = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
snake_case__ = leaf[1:-2] # for categorical special tokens
output[key].append(_a )
if len(output[key] ) == 1:
snake_case__ = output[key][0]
snake_case__ = tokens[tokens.find(_a ) + len(_a ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_a , added_vocab=_a )
if len(_a ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 33 | 1 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
lowerCamelCase__ : int = logging.get_logger(__name__)
class __magic_name__ (snake_case_ ):
'''simple docstring'''
def __init__( self:List[str] , *_a:Tuple , **_a:List[str] ):
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' , _a , )
super().__init__(*_a , **_a )
| 33 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __magic_name__ :
'''simple docstring'''
def __init__( self:Optional[Any] , _a:int , _a:str=3 , _a:Optional[int]=32 , _a:Optional[Any]=3 , _a:Tuple=10 , _a:List[Any]=[8, 16, 32, 64] , _a:str=[1, 1, 2, 1] , _a:Any=True , _a:List[Any]=True , _a:List[str]="relu" , _a:int=3 , _a:Tuple=None , _a:Tuple=["stage2", "stage3", "stage4"] , _a:List[Any]=[2, 3, 4] , _a:Union[str, Any]=1 , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = image_size
snake_case__ = num_channels
snake_case__ = embeddings_size
snake_case__ = hidden_sizes
snake_case__ = depths
snake_case__ = is_training
snake_case__ = use_labels
snake_case__ = hidden_act
snake_case__ = num_labels
snake_case__ = scope
snake_case__ = len(_a )
snake_case__ = out_features
snake_case__ = out_indices
snake_case__ = num_groups
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:Optional[int] , _a:Tuple , _a:int ):
snake_case__ = BitModel(config=_a )
model.to(_a )
model.eval()
snake_case__ = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE__ ( self:int , _a:Tuple , _a:Any , _a:Union[str, Any] ):
snake_case__ = self.num_labels
snake_case__ = BitForImageClassification(_a )
model.to(_a )
model.eval()
snake_case__ = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self:str , _a:str , _a:List[str] , _a:Any ):
snake_case__ = BitBackbone(config=_a )
model.to(_a )
model.eval()
snake_case__ = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
snake_case__ = None
snake_case__ = BitBackbone(config=_a )
model.to(_a )
model.eval()
snake_case__ = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ = config_and_inputs
snake_case__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Any = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
__lowercase : int = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
__lowercase : Tuple = False
__lowercase : Optional[Any] = False
__lowercase : str = False
__lowercase : Tuple = False
__lowercase : Tuple = False
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = BitModelTester(self )
snake_case__ = ConfigTester(self , config_class=_a , has_text_modality=_a )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return
@unittest.skip(reason='''Bit does not output attentions''' )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
pass
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = model_class(_a )
snake_case__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ = [*signature.parameters.keys()]
snake_case__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = model_class(config=_a )
for name, module in model.named_modules():
if isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
def check_hidden_states_output(_a:List[Any] , _a:int , _a:Union[str, Any] ):
snake_case__ = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
snake_case__ = model(**self._prepare_for_class(_a , _a ) )
snake_case__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case__ = self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
snake_case__ = layer_type
snake_case__ = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ = True
check_hidden_states_output(_a , _a , _a )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
pass
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ = BitModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def SCREAMING_SNAKE_CASE ( ) -> Any:
snake_case__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a )
snake_case__ = self.default_image_processor
snake_case__ = prepare_img()
snake_case__ = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
snake_case__ = model(**_a )
# verify the logits
snake_case__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _a )
snake_case__ = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
@require_torch
class __magic_name__ (snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[Any] = (BitBackbone,) if is_torch_available() else ()
__lowercase : int = BitConfig
__lowercase : Any = False
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = BitModelTester(self )
| 33 | 1 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ : List[Any] = logging.get_logger()
@dataclass
class __magic_name__ :
'''simple docstring'''
__lowercase : nn.Module
__lowercase : List[nn.Module] = field(default_factory=snake_case_ )
__lowercase : list = field(default_factory=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:str , _a:Tensor , _a:Tensor ):
snake_case__ = len(list(m.modules() ) ) == 1 or isinstance(_a , nn.Convad ) or isinstance(_a , nn.BatchNormad )
if has_not_submodules:
self.traced.append(_a )
def __call__( self:str , _a:Tensor ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(_a )
[x.remove() for x in self.handles]
return self
@property
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda _a : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class __magic_name__ :
'''simple docstring'''
__lowercase : nn.Module
__lowercase : nn.Module
__lowercase : int = 0
__lowercase : List = field(default_factory=snake_case_ )
__lowercase : List = field(default_factory=snake_case_ )
def __call__( self:Tuple , _a:Tensor ):
snake_case__ = Tracker(self.dest )(_a ).parametrized
snake_case__ = Tracker(self.src )(_a ).parametrized
snake_case__ = list(filter(lambda _a : type(_a ) not in self.src_skip , _a ) )
snake_case__ = list(filter(lambda _a : type(_a ) not in self.dest_skip , _a ) )
if len(_a ) != len(_a ):
raise Exception(
F"""Numbers of operations are different. Source module has {len(_a )} operations while"""
F""" destination module has {len(_a )}.""" )
for dest_m, src_m in zip(_a , _a ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F"""Transfered from={src_m} to={dest_m}""" )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = True ) -> int:
print(F"""Converting {name}...""" )
with torch.no_grad():
snake_case__ = timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase ).eval()
snake_case__ = ResNetForImageClassification(__lowerCAmelCase ).eval()
snake_case__ = ModuleTransfer(src=__lowerCAmelCase , dest=__lowerCAmelCase )
snake_case__ = torch.randn((1, 3, 224, 224) )
module_transfer(__lowerCAmelCase )
assert torch.allclose(from_model(__lowerCAmelCase ) , our_model(__lowerCAmelCase ).logits ), "The model logits don't match the original one."
snake_case__ = F"""resnet{'-'.join(name.split('resnet' ) )}"""
print(__lowerCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=__lowerCAmelCase , )
# we can use the convnext one
snake_case__ = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=__lowerCAmelCase , )
print(F"""Pushed {checkpoint_name}""" )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = True ) -> List[Any]:
snake_case__ = '''imagenet-1k-id2label.json'''
snake_case__ = 1000
snake_case__ = (1, num_labels)
snake_case__ = '''huggingface/label-files'''
snake_case__ = num_labels
snake_case__ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
snake_case__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case__ = idalabel
snake_case__ = {v: k for k, v in idalabel.items()}
snake_case__ = partial(__lowerCAmelCase , num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid=__lowerCAmelCase )
snake_case__ = {
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
}
if model_name:
convert_weight_and_push(__lowerCAmelCase , names_to_config[model_name] , __lowerCAmelCase , __lowerCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return config, expected_shape
if __name__ == "__main__":
lowerCamelCase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
lowerCamelCase__ : Tuple = parser.parse_args()
lowerCamelCase__ : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 33 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
lowerCamelCase__ : Any = """\
"""
lowerCamelCase__ : List[str] = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
lowerCamelCase__ : Any = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __magic_name__ (datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:int , _a:List[Any] , _a:int = 16 , _a:bool = True , _a:Any=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
snake_case__ = '''cuda'''
else:
snake_case__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
snake_case__ = AutoModelForCausalLM.from_pretrained(_a )
snake_case__ = model.to(_a )
snake_case__ = AutoTokenizer.from_pretrained(_a )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
snake_case__ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_a ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
snake_case__ = model.config.max_length - 1
else:
snake_case__ = model.config.max_length
snake_case__ = tokenizer(
_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , return_tensors='''pt''' , return_attention_mask=_a , ).to(_a )
snake_case__ = encodings['''input_ids''']
snake_case__ = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
snake_case__ = []
snake_case__ = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(_a ) , _a ) ):
snake_case__ = min(start_index + batch_size , len(_a ) )
snake_case__ = encoded_texts[start_index:end_index]
snake_case__ = attn_masks[start_index:end_index]
if add_start_token:
snake_case__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_a )
snake_case__ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
snake_case__ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_a ), attn_mask] , dim=1 )
snake_case__ = encoded_batch
with torch.no_grad():
snake_case__ = model(_a , attention_mask=_a ).logits
snake_case__ = out_logits[..., :-1, :].contiguous()
snake_case__ = labels[..., 1:].contiguous()
snake_case__ = attn_mask[..., 1:].contiguous()
snake_case__ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _a ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_a )}
| 33 | 1 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase__ : Optional[Any] = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[Any]:
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Dict:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
from transformers.testing_utils import pytest_terminal_summary_main
snake_case__ = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__lowerCAmelCase , id=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
snake_case__ = 0
# Doctest custom flag to ignore output.
lowerCamelCase__ : Tuple = doctest.register_optionflag("""IGNORE_RESULT""")
lowerCamelCase__ : Optional[Any] = doctest.OutputChecker
class __magic_name__ (snake_case_ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Optional[Any] , _a:Optional[int] , _a:Union[str, Any] ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , _a , _a , _a )
lowerCamelCase__ : Optional[int] = CustomOutputChecker
lowerCamelCase__ : Dict = HfDoctestModule
lowerCamelCase__ : int = HfDocTestParser
| 33 |
import os
from datetime import datetime as dt
from github import Github
lowerCamelCase__ : int = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
snake_case__ = Github(os.environ['''GITHUB_TOKEN'''] )
snake_case__ = g.get_repo('''huggingface/diffusers''' )
snake_case__ = repo.get_issues(state='''open''' )
for issue in open_issues:
snake_case__ = sorted(issue.get_comments() , key=lambda __lowerCAmelCase : i.created_at , reverse=__lowerCAmelCase )
snake_case__ = comments[0] if len(__lowerCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 33 | 1 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ : Dict = """▁"""
lowerCamelCase__ : List[str] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __magic_name__ (snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[Any] = BigBirdTokenizer
__lowercase : Dict = BigBirdTokenizerFast
__lowercase : List[Any] = True
__lowercase : List[Any] = True
def SCREAMING_SNAKE_CASE__ ( self:str ):
super().setUp()
snake_case__ = self.tokenizer_class(_a , keep_accents=_a )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = '''<s>'''
snake_case__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''[MASK]''' )
self.assertEqual(len(_a ) , 10_04 )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def SCREAMING_SNAKE_CASE__ ( self:int ):
if not self.test_rust_tokenizer:
return
snake_case__ = self.get_tokenizer()
snake_case__ = self.get_rust_tokenizer()
snake_case__ = '''I was born in 92000, and this is falsé.'''
snake_case__ = tokenizer.tokenize(_a )
snake_case__ = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
snake_case__ = tokenizer.encode(_a , add_special_tokens=_a )
snake_case__ = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
snake_case__ = self.get_rust_tokenizer()
snake_case__ = tokenizer.encode(_a )
snake_case__ = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ = BigBirdTokenizer(_a , keep_accents=_a )
snake_case__ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_a , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [2_85, 46, 10, 1_70, 3_82] , )
snake_case__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
snake_case__ = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
snake_case__ = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
@slow
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = '''Hello World!'''
snake_case__ = [65, 1_85_36, 22_60, 1_01, 66]
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
snake_case__ = [65, 8_71, 4_19, 3_58, 9_46, 9_91, 25_21, 4_52, 3_58, 13_57, 3_87, 77_51, 35_36, 1_12, 9_85, 4_56, 1_26, 8_65, 9_38, 54_00, 57_34, 4_58, 13_68, 4_67, 7_86, 24_62, 52_46, 11_59, 6_33, 8_65, 45_19, 4_57, 5_82, 8_52, 25_57, 4_27, 9_16, 5_08, 4_05, 3_43_24, 4_97, 3_91, 4_08, 1_13_42, 12_44, 3_85, 1_00, 9_38, 9_85, 4_56, 5_74, 3_62, 1_25_97, 32_00, 31_29, 11_72, 66] # noqa: E231
# fmt: on
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self:Any ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
snake_case__ = list(self.big_tokenizer.get_vocab().keys() )[:10]
snake_case__ = ''' '''.join(_a )
snake_case__ = self.big_tokenizer.encode_plus(_a , return_tensors='''pt''' , return_token_type_ids=_a )
snake_case__ = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_a )
snake_case__ = BigBirdConfig(attention_type='''original_full''' )
snake_case__ = BigBirdModel(_a )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_a )
model(**_a )
@slow
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
snake_case__ = tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids )
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' )
@slow
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
# fmt: off
snake_case__ = {'''input_ids''': [[65, 3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14, 66], [65, 4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 33 |
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'''kwargs, expected''' , [
({'''num_shards''': 0, '''max_num_jobs''': 1}, []),
({'''num_shards''': 10, '''max_num_jobs''': 1}, [range(10 )]),
({'''num_shards''': 10, '''max_num_jobs''': 10}, [range(__lowerCAmelCase , i + 1 ) for i in range(10 )]),
({'''num_shards''': 1, '''max_num_jobs''': 10}, [range(1 )]),
({'''num_shards''': 10, '''max_num_jobs''': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'''num_shards''': 3, '''max_num_jobs''': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
snake_case__ = _distribute_shards(**__lowerCAmelCase )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, max_num_jobs, expected''' , [
({'''foo''': 0}, 10, [{'''foo''': 0}]),
({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]),
({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]),
({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]),
({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]),
] , )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
snake_case__ = _split_gen_kwargs(__lowerCAmelCase , __lowerCAmelCase )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, expected''' , [
({'''foo''': 0}, 1),
({'''shards''': [0]}, 1),
({'''shards''': [0, 1, 2, 3]}, 4),
({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4),
({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4),
({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError),
] , )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
if expected is RuntimeError:
with pytest.raises(__lowerCAmelCase ):
_number_of_shards_in_gen_kwargs(__lowerCAmelCase )
else:
snake_case__ = _number_of_shards_in_gen_kwargs(__lowerCAmelCase )
assert out == expected
| 33 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : str = 'camembert'
def __init__( self:str , _a:List[str]=3_05_22 , _a:Tuple=7_68 , _a:str=12 , _a:Optional[int]=12 , _a:Tuple=30_72 , _a:Tuple="gelu" , _a:Any=0.1 , _a:str=0.1 , _a:Union[str, Any]=5_12 , _a:str=2 , _a:Dict=0.02 , _a:Tuple=1e-12 , _a:int=1 , _a:Dict=0 , _a:str=2 , _a:int="absolute" , _a:Tuple=True , _a:Dict=None , **_a:Tuple , ):
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = hidden_act
snake_case__ = intermediate_size
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = type_vocab_size
snake_case__ = initializer_range
snake_case__ = layer_norm_eps
snake_case__ = position_embedding_type
snake_case__ = use_cache
snake_case__ = classifier_dropout
class __magic_name__ (snake_case_ ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
if self.task == "multiple-choice":
snake_case__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
snake_case__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 33 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : str = IFImgaImgSuperResolutionPipeline
__lowercase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'}
__lowercase : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'} )
__lowercase : List[str] = PipelineTesterMixin.required_optional_params - {'latents'}
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
return self._get_superresolution_dummy_components()
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:Optional[Any]=0 ):
if str(_a ).startswith('''mps''' ):
snake_case__ = torch.manual_seed(_a )
else:
snake_case__ = torch.Generator(device=_a ).manual_seed(_a )
snake_case__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
snake_case__ = floats_tensor((1, 3, 16, 16) , rng=random.Random(_a ) ).to(_a )
snake_case__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self:str ):
self._test_save_load_local()
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 33 | 1 |
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
snake_case__ = str(bin(__lowerCAmelCase ) )[2:] # remove the leading "0b"
snake_case__ = str(bin(__lowerCAmelCase ) )[2:] # remove the leading "0b"
snake_case__ = max(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(__lowerCAmelCase ) , b_binary.zfill(__lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33 |
import math
class __magic_name__ :
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:list[list[float]] , _a:list[int] ):
snake_case__ = 0.0
snake_case__ = 0.0
for i in range(len(_a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:list[list[int | float]] , _a:list[int] , _a:int , _a:float ):
for i in range(len(_a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def SCREAMING_SNAKE_CASE ( ) -> None:
# Training Examples ( m, n )
snake_case__ = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
snake_case__ = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
snake_case__ = SelfOrganizingMap()
snake_case__ = 3
snake_case__ = 0.5
for _ in range(__lowerCAmelCase ):
for j in range(len(__lowerCAmelCase ) ):
# training sample
snake_case__ = training_samples[j]
# Compute the winning vector
snake_case__ = self_organizing_map.get_winner(__lowerCAmelCase , __lowerCAmelCase )
# Update the winning vector
snake_case__ = self_organizing_map.update(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# classify test sample
snake_case__ = [0, 0, 0, 1]
snake_case__ = self_organizing_map.get_winner(__lowerCAmelCase , __lowerCAmelCase )
# results
print(F"""Clusters that the test sample belongs to : {winner}""" )
print(F"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 33 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/resolve/main/config.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/config.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/config.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json""",
}
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Any = 'bloom'
__lowercase : str = ['past_key_values']
__lowercase : Optional[Any] = {
'num_hidden_layers': 'n_layer',
'num_attention_heads': 'n_head',
}
def __init__( self:Union[str, Any] , _a:int=25_08_80 , _a:Dict=64 , _a:List[Any]=2 , _a:Optional[int]=8 , _a:int=1e-5 , _a:List[str]=0.02 , _a:Union[str, Any]=True , _a:List[str]=1 , _a:Tuple=2 , _a:Any=False , _a:Optional[Any]=0.0 , _a:int=0.0 , _a:Any=1 , _a:str=False , **_a:Any , ):
snake_case__ = vocab_size
# Backward compatibility with n_embed kwarg
snake_case__ = kwargs.pop('''n_embed''' , _a )
snake_case__ = hidden_size if n_embed is None else n_embed
snake_case__ = n_layer
snake_case__ = n_head
snake_case__ = layer_norm_epsilon
snake_case__ = initializer_range
snake_case__ = use_cache
snake_case__ = pretraining_tp
snake_case__ = apply_residual_connection_post_layernorm
snake_case__ = hidden_dropout
snake_case__ = attention_dropout
snake_case__ = bos_token_id
snake_case__ = eos_token_id
snake_case__ = slow_but_exact
super().__init__(bos_token_id=_a , eos_token_id=_a , **_a )
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Optional[int] = version.parse('1.12' )
def __init__( self:Union[str, Any] , _a:PretrainedConfig , _a:str = "default" , _a:List[PatchingSpec] = None , _a:bool = False , ):
super().__init__(_a , task=_a , patching_specs=_a , use_past=_a )
if not getattr(self._config , '''pad_token_id''' , _a ):
# TODO: how to do that better?
snake_case__ = 0
@property
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(_a , direction='''inputs''' , inverted_values_shape=_a )
snake_case__ = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
snake_case__ = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def SCREAMING_SNAKE_CASE__ ( self:Any ):
return self._config.n_layer
@property
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
return self._config.n_head
@property
def SCREAMING_SNAKE_CASE__ ( self:str ):
return 1e-3
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:"PreTrainedTokenizer" , _a:int = -1 , _a:int = -1 , _a:bool = False , _a:Optional["TensorType"] = None , ):
snake_case__ = super(_a , self ).generate_dummy_inputs(
_a , batch_size=_a , seq_length=_a , is_pair=_a , framework=_a )
# We need to order the input in the way they appears in the forward()
snake_case__ = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
snake_case__ , snake_case__ = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
snake_case__ = seqlen + 2
snake_case__ = self._config.hidden_size // self.num_attention_heads
snake_case__ = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
snake_case__ = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
snake_case__ = [
(torch.zeros(_a ), torch.zeros(_a )) for _ in range(self.num_layers )
]
snake_case__ = common_inputs['''attention_mask''']
if self.use_past:
snake_case__ = ordered_inputs['''attention_mask'''].dtype
snake_case__ = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_a , _a , dtype=_a )] , dim=1 )
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return 13
| 33 |
from __future__ import annotations
from statistics import mean
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list[int]:
snake_case__ = [0] * no_of_processes
snake_case__ = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(__lowerCAmelCase ):
snake_case__ = burst_time[i]
snake_case__ = []
snake_case__ = 0
snake_case__ = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
snake_case__ = []
snake_case__ = -1
for i in range(__lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
snake_case__ = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
snake_case__ = i
total_time += burst_time[target_process]
completed += 1
snake_case__ = 0
snake_case__ = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list[int]:
snake_case__ = [0] * no_of_processes
for i in range(__lowerCAmelCase ):
snake_case__ = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
lowerCamelCase__ : Tuple = 4
lowerCamelCase__ : Union[str, Any] = [2, 5, 3, 7]
lowerCamelCase__ : Optional[Any] = [0, 0, 0, 0]
lowerCamelCase__ : Dict = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowerCamelCase__ : Union[str, Any] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 33 | 1 |
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class __magic_name__ :
'''simple docstring'''
def __init__( self:str , _a:Optional[Any] , _a:Any=13 , _a:List[str]=7 , _a:str=True , _a:Dict=True , _a:Any=False , _a:Dict=True , _a:int=99 , _a:List[str]=64 , _a:Union[str, Any]=5 , _a:Optional[int]=4 , _a:int=64 , _a:List[str]="gelu" , _a:List[Any]=0.1 , _a:Dict=0.1 , _a:str=5_12 , _a:Optional[int]=16 , _a:Tuple=2 , _a:int=0.02 , _a:Dict=3 , _a:Tuple=4 , _a:Tuple=None , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_input_mask
snake_case__ = use_token_type_ids
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = type_vocab_size
snake_case__ = type_sequence_label_size
snake_case__ = initializer_range
snake_case__ = num_labels
snake_case__ = num_choices
snake_case__ = scope
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return MPNetConfig.from_pretrained('''microsoft/mpnet-base''' )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ = None
if self.use_input_mask:
snake_case__ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ = None
snake_case__ = None
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self:str ):
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:Dict , _a:Dict , _a:Optional[Any] , _a:Dict , _a:Optional[int] , _a:int ):
snake_case__ = MPNetModel(config=_a )
model.to(_a )
model.eval()
snake_case__ = model(_a , _a )
snake_case__ = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:str , _a:Optional[Any] , _a:Tuple , _a:Tuple , _a:Tuple , _a:Optional[int] ):
snake_case__ = MPNetForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
snake_case__ = model(
_a , attention_mask=_a , start_positions=_a , end_positions=_a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:int , _a:List[Any] , _a:Union[str, Any] , _a:Optional[int] , _a:Tuple , _a:List[str] ):
snake_case__ = self.num_labels
snake_case__ = MPNetForSequenceClassification(_a )
model.to(_a )
model.eval()
snake_case__ = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self:str , _a:List[Any] , _a:List[Any] , _a:Any , _a:int , _a:str , _a:Tuple ):
snake_case__ = self.num_choices
snake_case__ = MPNetForMultipleChoice(config=_a )
model.to(_a )
model.eval()
snake_case__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ = model(
_a , attention_mask=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:int , _a:Optional[Any] , _a:Optional[Any] , _a:Union[str, Any] , _a:List[Any] , _a:List[Any] ):
snake_case__ = self.num_labels
snake_case__ = MPNetForTokenClassification(config=_a )
model.to(_a )
model.eval()
snake_case__ = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = self.prepare_config_and_inputs()
((snake_case__) , (snake_case__) , (snake_case__) , (snake_case__) , (snake_case__) , (snake_case__)) = config_and_inputs
snake_case__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[int] = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
__lowercase : int = (
{
'feature-extraction': MPNetModel,
'fill-mask': MPNetForMaskedLM,
'question-answering': MPNetForQuestionAnswering,
'text-classification': MPNetForSequenceClassification,
'token-classification': MPNetForTokenClassification,
'zero-shot': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase : Tuple = False
__lowercase : Any = True
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = MPNetModelTester(self )
snake_case__ = ConfigTester(self , config_class=_a , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*_a )
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*_a )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*_a )
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*_a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*_a )
@require_torch
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = MPNetModel.from_pretrained('''microsoft/mpnet-base''' )
snake_case__ = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
snake_case__ = model(_a )[0]
snake_case__ = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , _a )
snake_case__ = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
| 33 |
lowerCamelCase__ : List[str] = """Alexander Joslin"""
import operator as op
from .stack import Stack
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
snake_case__ = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
snake_case__ = Stack()
snake_case__ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__lowerCAmelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(__lowerCAmelCase )
elif i == ")":
# RULE 4
snake_case__ = operator_stack.peek()
operator_stack.pop()
snake_case__ = operand_stack.peek()
operand_stack.pop()
snake_case__ = operand_stack.peek()
operand_stack.pop()
snake_case__ = operators[opr](__lowerCAmelCase , __lowerCAmelCase )
operand_stack.push(__lowerCAmelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase__ : Optional[Any] = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 33 | 1 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __magic_name__ (snake_case_ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = tempfile.mkdtemp()
snake_case__ = 8
# DPR tok
snake_case__ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
snake_case__ = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(_a , exist_ok=_a )
snake_case__ = os.path.join(_a , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
snake_case__ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
snake_case__ = dict(zip(_a , range(len(_a ) ) ) )
snake_case__ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
snake_case__ = {'''unk_token''': '''<unk>'''}
snake_case__ = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(_a , exist_ok=_a )
snake_case__ = os.path.join(_a , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case__ = os.path.join(_a , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_a ) )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = self.get_dummy_dataset()
snake_case__ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
snake_case__ = dataset
snake_case__ = RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:bool ):
snake_case__ = self.get_dummy_dataset()
snake_case__ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
snake_case__ = os.path.join(self.tmpdirname , '''dataset''' )
snake_case__ = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
snake_case__ = RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
snake_case__ = RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , _a ) , )
return retriever
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
snake_case__ = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
snake_case__ = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
snake_case__ = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(_a , open(_a , '''wb''' ) )
snake_case__ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
snake_case__ = RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = 1
snake_case__ = self.get_dummy_canonical_hf_index_retriever()
snake_case__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ , snake_case__ , snake_case__ = retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
snake_case__ = self.get_dummy_dataset()
retriever.save_pretrained(_a )
snake_case__ = RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
snake_case__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ = retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = 1
snake_case__ = self.get_dummy_custom_hf_index_retriever(from_disk=_a )
snake_case__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ , snake_case__ , snake_case__ = retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = self.get_dummy_custom_hf_index_retriever(from_disk=_a )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
snake_case__ = RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
snake_case__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ = retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ = 1
snake_case__ = self.get_dummy_custom_hf_index_retriever(from_disk=_a )
snake_case__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ , snake_case__ , snake_case__ = retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.get_dummy_custom_hf_index_retriever(from_disk=_a )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
snake_case__ = RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
snake_case__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ = retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = 1
snake_case__ = self.get_dummy_legacy_index_retriever()
snake_case__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ , snake_case__ , snake_case__ = retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , _a )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
snake_case__ = RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
snake_case__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ = retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
import torch
snake_case__ = 1
snake_case__ = self.get_dummy_canonical_hf_index_retriever()
snake_case__ = [[5, 7], [10, 11]]
snake_case__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ = retriever(_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a )
snake_case__ , snake_case__ , snake_case__ = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_a , _a )
self.assertIsInstance(_a , _a )
self.assertIsInstance(_a , np.ndarray )
snake_case__ = retriever(
_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a , return_tensors='''pt''' , )
snake_case__ , snake_case__ , snake_case__ , snake_case__ = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_a , torch.Tensor )
self.assertIsInstance(_a , torch.Tensor )
self.assertIsInstance(_a , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.get_dpr_ctx_encoder_tokenizer()
snake_case__ = 1
snake_case__ = self.get_dummy_custom_hf_index_retriever(from_disk=_a )
retriever.set_ctx_encoder_tokenizer(_a )
snake_case__ = [[5, 7], [10, 11]]
snake_case__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ = retriever(_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a )
self.assertEqual(
len(_a ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , _a ) # check for doc token related keys in dictionary.
| 33 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowerCamelCase__ : int = logging.get_logger(__name__)
class __magic_name__ (snake_case_ ):
'''simple docstring'''
def __init__( self:List[Any] , *_a:Dict , **_a:Tuple ):
warnings.warn(
'''The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PerceiverImageProcessor instead.''' , _a , )
super().__init__(*_a , **_a )
| 33 | 1 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Any:
snake_case__ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> str:
snake_case__ , snake_case__ = emb.weight.shape
snake_case__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
snake_case__ = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Dict:
snake_case__ = torch.load(__lowerCAmelCase , map_location='''cpu''' )
snake_case__ = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
snake_case__ = mam_aaa['''model''']
remove_ignore_keys_(__lowerCAmelCase )
snake_case__ = state_dict['''encoder.embed_tokens.weight'''].shape[0]
snake_case__ = MaMaaaConfig(
vocab_size=__lowerCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , )
snake_case__ = state_dict['''decoder.embed_tokens.weight''']
snake_case__ = MaMaaaForConditionalGeneration(__lowerCAmelCase )
model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
snake_case__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCamelCase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
lowerCamelCase__ : Optional[int] = parser.parse_args()
lowerCamelCase__ : List[str] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 33 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ : Tuple = {
"""configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""],
"""tokenization_roberta""": ["""RobertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = ["""RobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[int] = [
"""ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaForCausalLM""",
"""RobertaForMaskedLM""",
"""RobertaForMultipleChoice""",
"""RobertaForQuestionAnswering""",
"""RobertaForSequenceClassification""",
"""RobertaForTokenClassification""",
"""RobertaModel""",
"""RobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : List[str] = [
"""TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaForCausalLM""",
"""TFRobertaForMaskedLM""",
"""TFRobertaForMultipleChoice""",
"""TFRobertaForQuestionAnswering""",
"""TFRobertaForSequenceClassification""",
"""TFRobertaForTokenClassification""",
"""TFRobertaMainLayer""",
"""TFRobertaModel""",
"""TFRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : str = [
"""FlaxRobertaForCausalLM""",
"""FlaxRobertaForMaskedLM""",
"""FlaxRobertaForMultipleChoice""",
"""FlaxRobertaForQuestionAnswering""",
"""FlaxRobertaForSequenceClassification""",
"""FlaxRobertaForTokenClassification""",
"""FlaxRobertaModel""",
"""FlaxRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 33 | 1 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowerCamelCase__ : List[Any] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
lowerCamelCase__ : int = {
# fairseq:
"""wmt19-ru-en""": {"""length_penalty""": 1.1},
"""wmt19-en-ru""": {"""length_penalty""": 1.1_5},
"""wmt19-en-de""": {"""length_penalty""": 1.0},
"""wmt19-de-en""": {"""length_penalty""": 1.1},
# allenai:
"""wmt16-en-de-dist-12-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-dist-6-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-12-1""": {"""length_penalty""": 0.8},
"""wmt19-de-en-6-6-base""": {"""length_penalty""": 0.6},
"""wmt19-de-en-6-6-big""": {"""length_penalty""": 0.6},
}
# this remaps the different models to their organization names
lowerCamelCase__ : List[str] = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCamelCase__ : Optional[int] = """facebook"""
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
lowerCamelCase__ : int = """allenai"""
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Union[str, Any]:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
snake_case__ = dict((re.sub(r'''@@$''' , '''''' , __lowerCAmelCase ), v) if k.endswith('''@@''' ) else (re.sub(r'''$''' , '''</w>''' , __lowerCAmelCase ), v) for k, v in d.items() )
snake_case__ = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
snake_case__ = d[k] # restore
return da
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Any:
# prep
assert os.path.exists(__lowerCAmelCase )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
snake_case__ = basename(__lowerCAmelCase )
snake_case__ = dirname(__lowerCAmelCase )
snake_case__ = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
snake_case__ = cls.hub_models()
snake_case__ = {'''bpe''': '''fastbpe''', '''tokenizer''': '''moses'''}
snake_case__ = '''.'''
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F"""using checkpoint {checkpoint_file}""" )
snake_case__ = hub_utils.from_pretrained(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , archive_map=__lowerCAmelCase , **__lowerCAmelCase )
snake_case__ = vars(chkpt['''args''']['''model'''] )
snake_case__ = args['''source_lang''']
snake_case__ = args['''target_lang''']
snake_case__ = dirname(__lowerCAmelCase )
snake_case__ = basename(__lowerCAmelCase )
# dicts
snake_case__ = os.path.join(__lowerCAmelCase , F"""dict.{src_lang}.txt""" )
snake_case__ = os.path.join(__lowerCAmelCase , F"""dict.{tgt_lang}.txt""" )
snake_case__ = Dictionary.load(__lowerCAmelCase )
snake_case__ = rewrite_dict_keys(src_dict.indices )
snake_case__ = len(__lowerCAmelCase )
snake_case__ = os.path.join(__lowerCAmelCase , '''vocab-src.json''' )
print(F"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__lowerCAmelCase , ensure_ascii=__lowerCAmelCase , indent=__lowerCAmelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
snake_case__ = True
for k in src_vocab.keys():
if not k.islower():
snake_case__ = False
break
snake_case__ = Dictionary.load(__lowerCAmelCase )
snake_case__ = rewrite_dict_keys(tgt_dict.indices )
snake_case__ = len(__lowerCAmelCase )
snake_case__ = os.path.join(__lowerCAmelCase , '''vocab-tgt.json''' )
print(F"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__lowerCAmelCase , ensure_ascii=__lowerCAmelCase , indent=__lowerCAmelCase ) )
# merges_file (bpecodes)
snake_case__ = os.path.join(__lowerCAmelCase , VOCAB_FILES_NAMES['''merges_file'''] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
snake_case__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if os.path.exists(__lowerCAmelCase ):
break
with open(__lowerCAmelCase , encoding='''utf-8''' ) as fin:
snake_case__ = fin.read()
snake_case__ = re.sub(r''' \d+$''' , '''''' , __lowerCAmelCase , 0 , re.M ) # remove frequency number
print(F"""Generating {merges_file}""" )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as fout:
fout.write(__lowerCAmelCase )
# model config
snake_case__ = os.path.join(__lowerCAmelCase , '''config.json''' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F"""need to extend tokenizer to support bpe={args['bpe']}"""
assert args["tokenizer"] == "moses", F"""need to extend tokenizer to support bpe={args['tokenizer']}"""
snake_case__ = {
'''architectures''': ['''FSMTForConditionalGeneration'''],
'''model_type''': '''fsmt''',
'''activation_dropout''': args['''activation_dropout'''],
'''activation_function''': '''relu''',
'''attention_dropout''': args['''attention_dropout'''],
'''d_model''': args['''decoder_embed_dim'''],
'''dropout''': args['''dropout'''],
'''init_std''': 0.02,
'''max_position_embeddings''': args['''max_source_positions'''],
'''num_hidden_layers''': args['''encoder_layers'''],
'''src_vocab_size''': src_vocab_size,
'''tgt_vocab_size''': tgt_vocab_size,
'''langs''': [src_lang, tgt_lang],
'''encoder_attention_heads''': args['''encoder_attention_heads'''],
'''encoder_ffn_dim''': args['''encoder_ffn_embed_dim'''],
'''encoder_layerdrop''': args['''encoder_layerdrop'''],
'''encoder_layers''': args['''encoder_layers'''],
'''decoder_attention_heads''': args['''decoder_attention_heads'''],
'''decoder_ffn_dim''': args['''decoder_ffn_embed_dim'''],
'''decoder_layerdrop''': args['''decoder_layerdrop'''],
'''decoder_layers''': args['''decoder_layers'''],
'''bos_token_id''': 0,
'''pad_token_id''': 1,
'''eos_token_id''': 2,
'''is_encoder_decoder''': True,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_all_embeddings'''],
}
# good hparam defaults to start with
snake_case__ = 5
snake_case__ = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
snake_case__ = best_score_hparams[model_dir]['''length_penalty''']
else:
snake_case__ = 1.0
print(F"""Generating {fsmt_model_config_file}""" )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__lowerCAmelCase , ensure_ascii=__lowerCAmelCase , indent=__lowerCAmelCase ) )
# tokenizer config
snake_case__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ = {
'''langs''': [src_lang, tgt_lang],
'''model_max_length''': 1024,
'''do_lower_case''': do_lower_case,
}
print(F"""Generating {fsmt_tokenizer_config_file}""" )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__lowerCAmelCase , ensure_ascii=__lowerCAmelCase , indent=__lowerCAmelCase ) )
# model
snake_case__ = chkpt['''models'''][0]
snake_case__ = model.state_dict()
# rename keys to start with 'model.'
snake_case__ = OrderedDict(('''model.''' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
snake_case__ = [
'''model.model''',
'''model.encoder.version''',
'''model.decoder.version''',
'''model.encoder_embed_tokens.weight''',
'''model.decoder_embed_tokens.weight''',
'''model.encoder.embed_positions._float_tensor''',
'''model.decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
model_state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ = FSMTConfig.from_pretrained(__lowerCAmelCase )
snake_case__ = FSMTForConditionalGeneration(__lowerCAmelCase )
# check that it loads ok
model_new.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
# save
snake_case__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(__lowerCAmelCase , __lowerCAmelCase )
print('''Conversion is done!''' )
print('''\nLast step is to upload the files to s3''' )
print(F"""cd {data_root}""" )
print(F"""transformers-cli upload {model_dir}""" )
if __name__ == "__main__":
lowerCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fsmt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase__ : Dict = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 33 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[Any]:
snake_case__ = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __magic_name__ (snake_case_ ,snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Dict = StableDiffusionLatentUpscalePipeline
__lowercase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
__lowercase : List[Any] = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
__lowercase : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowercase : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__lowercase : List[Any] = frozenset([] )
__lowercase : Any = True
@property
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = 1
snake_case__ = 4
snake_case__ = (16, 16)
snake_case__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_a )
return image
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
torch.manual_seed(0 )
snake_case__ = UNetaDConditionModel(
act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=_a , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) , in_channels=8 , mid_block_type=_a , only_cross_attention=_a , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , )
snake_case__ = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
snake_case__ = EulerDiscreteScheduler(prediction_type='''sample''' )
snake_case__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''quick_gelu''' , projection_dim=5_12 , )
snake_case__ = CLIPTextModel(_a )
snake_case__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case__ = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:Optional[Any] , _a:List[str]=0 ):
if str(_a ).startswith('''mps''' ):
snake_case__ = torch.manual_seed(_a )
else:
snake_case__ = torch.Generator(device=_a ).manual_seed(_a )
snake_case__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = '''cpu'''
snake_case__ = self.get_dummy_components()
snake_case__ = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = self.get_dummy_inputs(_a )
snake_case__ = pipe(**_a ).images
snake_case__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_56, 2_56, 3) )
snake_case__ = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
snake_case__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_a , 1e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:str ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
super().test_save_load_local(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:str ):
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
snake_case__ = self.get_dummy_components()
snake_case__ = self.pipeline_class(**_a )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = self.get_dummy_inputs(_a )
snake_case__ = 2
snake_case__ = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
snake_case__ = getattr(_a , scheduler_enum.name )
snake_case__ = scheduler_cls.from_config(pipe.scheduler.config )
snake_case__ = pipe(**_a )[0]
outputs.append(_a )
assert check_same_shape(_a )
@require_torch_gpu
@slow
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = torch.manual_seed(33 )
snake_case__ = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
snake_case__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
snake_case__ = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
snake_case__ = pipe(_a , generator=_a , output_type='''latent''' ).images
snake_case__ = upscaler(
prompt=_a , image=_a , num_inference_steps=20 , guidance_scale=0 , generator=_a , output_type='''np''' , ).images[0]
snake_case__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5e-2
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = torch.manual_seed(33 )
snake_case__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
snake_case__ = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
snake_case__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
snake_case__ = upscaler(
prompt=_a , image=_a , num_inference_steps=20 , guidance_scale=0 , generator=_a , output_type='''np''' , ).images[0]
snake_case__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5e-2
| 33 | 1 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
lowerCamelCase__ : Optional[int] = """
import os
"""
lowerCamelCase__ : Any = """
def foo():
import os
return False
"""
lowerCamelCase__ : Tuple = """
def foo():
def bar():
if True:
import os
return False
return bar()
"""
lowerCamelCase__ : List[str] = """
import os
try:
import bar
except ImportError:
raise ValueError()
"""
lowerCamelCase__ : Optional[int] = """
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
"""
lowerCamelCase__ : Union[str, Any] = """
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
"""
lowerCamelCase__ : Tuple = """
import os
try:
import bar
except ImportError as e:
raise ValueError()
"""
lowerCamelCase__ : Tuple = """
import os
try:
import bar
except:
raise ValueError()
"""
lowerCamelCase__ : List[str] = """
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
"""
lowerCamelCase__ : Union[str, Any] = """
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
"""
lowerCamelCase__ : Optional[Any] = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
snake_case__ = os.path.join(__lowerCAmelCase , '''test_file.py''' )
with open(__lowerCAmelCase , '''w''' ) as _tmp_file:
_tmp_file.write(__lowerCAmelCase )
snake_case__ = get_imports(__lowerCAmelCase )
assert parsed_imports == ["os"]
| 33 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = '''ZinengTang/tvlt-base'''
snake_case__ = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE__ ( self:Dict , **_a:List[Any] ):
return TvltImageProcessor.from_pretrained(self.checkpoint , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , **_a:Tuple ):
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
processor.save_pretrained(self.tmpdirname )
snake_case__ = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , _a )
self.assertIsInstance(processor.image_processor , _a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
snake_case__ = np.ones([1_20_00] )
snake_case__ = feature_extractor(_a , return_tensors='''np''' )
snake_case__ = processor(audio=_a , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
snake_case__ = np.ones([3, 2_24, 2_24] )
snake_case__ = image_processor(_a , return_tensors='''np''' )
snake_case__ = processor(images=_a , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
snake_case__ = np.ones([1_20_00] )
snake_case__ = np.ones([3, 2_24, 2_24] )
snake_case__ = processor(audio=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 33 | 1 |
from bisect import bisect
from itertools import accumulate
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
snake_case__ = sorted(zip(__lowerCAmelCase , __lowerCAmelCase ) , key=lambda __lowerCAmelCase : x[0] / x[1] , reverse=__lowerCAmelCase )
snake_case__ , snake_case__ = [i[0] for i in r], [i[1] for i in r]
snake_case__ = list(accumulate(__lowerCAmelCase ) )
snake_case__ = bisect(__lowerCAmelCase , __lowerCAmelCase )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Optional[int] = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Optional[int] = 'data2vec-vision'
def __init__( self:int , _a:Tuple=7_68 , _a:int=12 , _a:Any=12 , _a:Optional[int]=30_72 , _a:Optional[int]="gelu" , _a:Any=0.0 , _a:Any=0.0 , _a:List[str]=0.02 , _a:Dict=1e-12 , _a:Tuple=2_24 , _a:Any=16 , _a:str=3 , _a:str=False , _a:Union[str, Any]=False , _a:Optional[int]=False , _a:Any=False , _a:Dict=0.1 , _a:Dict=0.1 , _a:str=True , _a:str=[3, 5, 7, 11] , _a:List[str]=[1, 2, 3, 6] , _a:List[str]=True , _a:Any=0.4 , _a:str=2_56 , _a:Union[str, Any]=1 , _a:int=False , _a:Optional[int]=2_55 , **_a:Dict , ):
super().__init__(**_a )
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = initializer_range
snake_case__ = layer_norm_eps
snake_case__ = image_size
snake_case__ = patch_size
snake_case__ = num_channels
snake_case__ = use_mask_token
snake_case__ = use_absolute_position_embeddings
snake_case__ = use_relative_position_bias
snake_case__ = use_shared_relative_position_bias
snake_case__ = layer_scale_init_value
snake_case__ = drop_path_rate
snake_case__ = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case__ = out_indices
snake_case__ = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case__ = use_auxiliary_head
snake_case__ = auxiliary_loss_weight
snake_case__ = auxiliary_channels
snake_case__ = auxiliary_num_convs
snake_case__ = auxiliary_concat_input
snake_case__ = semantic_loss_ignore_index
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Any = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return 1e-4
| 33 | 1 |
from __future__ import annotations
from typing import Any
class __magic_name__ (snake_case_ ):
'''simple docstring'''
pass
class __magic_name__ :
'''simple docstring'''
def __init__( self:int , _a:Any ):
snake_case__ = data
snake_case__ = None
def __iter__( self:Any ):
snake_case__ = self
snake_case__ = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(_a )
yield node.data
snake_case__ = node.next_node
@property
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
lowerCamelCase__ : Optional[Any] = Node(1)
lowerCamelCase__ : Union[str, Any] = Node(2)
lowerCamelCase__ : Union[str, Any] = Node(3)
lowerCamelCase__ : Dict = Node(4)
print(root_node.has_loop) # False
lowerCamelCase__ : List[str] = root_node.next_node
print(root_node.has_loop) # True
lowerCamelCase__ : int = Node(5)
lowerCamelCase__ : List[Any] = Node(6)
lowerCamelCase__ : Dict = Node(5)
lowerCamelCase__ : Optional[int] = Node(6)
print(root_node.has_loop) # False
lowerCamelCase__ : Optional[int] = Node(1)
print(root_node.has_loop) # False
| 33 |
import os
import sys
lowerCamelCase__ : Optional[int] = os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowerCamelCase__ : Optional[int] = [
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Any:
return AutoConfig.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[str]:
return AutoTokenizer.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModel.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Tuple:
return AutoModel.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]:
return AutoModelForCausalLM.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[Any]:
return AutoModelForMaskedLM.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[str]:
return AutoModelForSequenceClassification.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]:
return AutoModelForQuestionAnswering.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
| 33 | 1 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowerCamelCase__ : Optional[int] = """src/diffusers"""
lowerCamelCase__ : int = """."""
# This is to make sure the diffusers module imported is the one in the repo.
lowerCamelCase__ : str = importlib.util.spec_from_file_location(
"""diffusers""",
os.path.join(DIFFUSERS_PATH, """__init__.py"""),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowerCamelCase__ : int = spec.loader.load_module()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
return line.startswith(__lowerCAmelCase ) or len(__lowerCAmelCase ) <= 1 or re.search(r'''^\s*\)(\s*->.*:|:)\s*$''' , __lowerCAmelCase ) is not None
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[int]:
snake_case__ = object_name.split('''.''' )
snake_case__ = 0
# First let's find the module where our object lives.
snake_case__ = parts[i]
while i < len(__lowerCAmelCase ) and not os.path.isfile(os.path.join(__lowerCAmelCase , F"""{module}.py""" ) ):
i += 1
if i < len(__lowerCAmelCase ):
snake_case__ = os.path.join(__lowerCAmelCase , parts[i] )
if i >= len(__lowerCAmelCase ):
raise ValueError(F"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(__lowerCAmelCase , F"""{module}.py""" ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
snake_case__ = f.readlines()
# Now let's find the class / func in the code!
snake_case__ = ''''''
snake_case__ = 0
for name in parts[i + 1 :]:
while (
line_index < len(__lowerCAmelCase ) and re.search(rF"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__lowerCAmelCase ):
raise ValueError(F""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
snake_case__ = line_index
while line_index < len(__lowerCAmelCase ) and _should_continue(lines[line_index] , __lowerCAmelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
snake_case__ = lines[start_index:line_index]
return "".join(__lowerCAmelCase )
lowerCamelCase__ : Dict = re.compile(r"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""")
lowerCamelCase__ : int = re.compile(r"""^\s*(\S+)->(\S+)(\s+.*|$)""")
lowerCamelCase__ : int = re.compile(r"""<FILL\s+[^>]*>""")
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[Any]:
snake_case__ = code.split('''\n''' )
snake_case__ = 0
while idx < len(__lowerCAmelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__lowerCAmelCase ):
return re.search(r'''^(\s*)\S''' , lines[idx] ).groups()[0]
return ""
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[Any]:
snake_case__ = len(get_indent(__lowerCAmelCase ) ) > 0
if has_indent:
snake_case__ = F"""class Bla:\n{code}"""
snake_case__ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=__lowerCAmelCase )
snake_case__ = black.format_str(__lowerCAmelCase , mode=__lowerCAmelCase )
snake_case__ , snake_case__ = style_docstrings_in_code(__lowerCAmelCase )
return result[len('''class Bla:\n''' ) :] if has_indent else result
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=False ) -> Any:
with open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
snake_case__ = f.readlines()
snake_case__ = []
snake_case__ = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__lowerCAmelCase ):
snake_case__ = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
snake_case__ , snake_case__ , snake_case__ = search.groups()
snake_case__ = find_code_in_diffusers(__lowerCAmelCase )
snake_case__ = get_indent(__lowerCAmelCase )
snake_case__ = line_index + 1 if indent == theoretical_indent else line_index + 2
snake_case__ = theoretical_indent
snake_case__ = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
snake_case__ = True
while line_index < len(__lowerCAmelCase ) and should_continue:
line_index += 1
if line_index >= len(__lowerCAmelCase ):
break
snake_case__ = lines[line_index]
snake_case__ = _should_continue(__lowerCAmelCase , __lowerCAmelCase ) and re.search(F"""^{indent}# End copy""" , __lowerCAmelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
snake_case__ = lines[start_index:line_index]
snake_case__ = ''''''.join(__lowerCAmelCase )
# Remove any nested `Copied from` comments to avoid circular copies
snake_case__ = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(__lowerCAmelCase ) is None]
snake_case__ = '''\n'''.join(__lowerCAmelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(__lowerCAmelCase ) > 0:
snake_case__ = replace_pattern.replace('''with''' , '''''' ).split(''',''' )
snake_case__ = [_re_replace_pattern.search(__lowerCAmelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
snake_case__ , snake_case__ , snake_case__ = pattern.groups()
snake_case__ = re.sub(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if option.strip() == "all-casing":
snake_case__ = re.sub(obja.lower() , obja.lower() , __lowerCAmelCase )
snake_case__ = re.sub(obja.upper() , obja.upper() , __lowerCAmelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
snake_case__ = blackify(lines[start_index - 1] + theoretical_code )
snake_case__ = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
snake_case__ = lines[:start_index] + [theoretical_code] + lines[line_index:]
snake_case__ = start_index + 1
if overwrite and len(__lowerCAmelCase ) > 0:
# Warn the user a file has been modified.
print(F"""Detected changes, rewriting {filename}.""" )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(__lowerCAmelCase )
return diffs
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = False ) -> str:
snake_case__ = glob.glob(os.path.join(__lowerCAmelCase , '''**/*.py''' ) , recursive=__lowerCAmelCase )
snake_case__ = []
for filename in all_files:
snake_case__ = is_copy_consistent(__lowerCAmelCase , __lowerCAmelCase )
diffs += [F"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(__lowerCAmelCase ) > 0:
snake_case__ = '''\n'''.join(__lowerCAmelCase )
raise Exception(
'''Found the following copy inconsistencies:\n'''
+ diff
+ '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' )
if __name__ == "__main__":
lowerCamelCase__ : Dict = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowerCamelCase__ : Any = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 33 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : str = (CMStochasticIterativeScheduler,)
__lowercase : List[str] = 10
def SCREAMING_SNAKE_CASE__ ( self:int , **_a:Optional[int] ):
snake_case__ = {
'''num_train_timesteps''': 2_01,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
config.update(**_a )
return config
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = 10
snake_case__ = self.get_scheduler_config()
snake_case__ = self.scheduler_classes[0](**_a )
scheduler.set_timesteps(_a )
snake_case__ = scheduler.timesteps[0]
snake_case__ = scheduler.timesteps[1]
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
snake_case__ = scheduler.step(_a , _a , _a ).prev_sample
snake_case__ = scheduler.step(_a , _a , _a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_a )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=_a )
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = 1
scheduler.set_timesteps(_a )
snake_case__ = scheduler.timesteps
snake_case__ = torch.manual_seed(0 )
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(_a ):
# 1. scale model input
snake_case__ = scheduler.scale_model_input(_a , _a )
# 2. predict noise residual
snake_case__ = model(_a , _a )
# 3. predict previous sample x_t-1
snake_case__ = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
snake_case__ = pred_prev_sample
snake_case__ = torch.sum(torch.abs(_a ) )
snake_case__ = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 192.7614 ) < 1e-2
assert abs(result_mean.item() - 0.2510 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = [1_06, 0]
scheduler.set_timesteps(timesteps=_a )
snake_case__ = scheduler.timesteps
snake_case__ = torch.manual_seed(0 )
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
snake_case__ = scheduler.scale_model_input(_a , _a )
# 2. predict noise residual
snake_case__ = model(_a , _a )
# 3. predict previous sample x_t-1
snake_case__ = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
snake_case__ = pred_prev_sample
snake_case__ = torch.sum(torch.abs(_a ) )
snake_case__ = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 347.6357 ) < 1e-2
assert abs(result_mean.item() - 0.4527 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = [39, 30, 12, 15, 0]
with self.assertRaises(_a , msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_a )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = [39, 30, 12, 1, 0]
snake_case__ = len(_a )
with self.assertRaises(_a , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_a , timesteps=_a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_a , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_a )
| 33 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = """▁"""
lowerCamelCase__ : Optional[int] = {"""vocab_file""": """sentencepiece.bpe.model"""}
lowerCamelCase__ : Optional[int] = {
"""vocab_file""": {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""",
}
}
lowerCamelCase__ : List[Any] = {
"""facebook/xglm-564M""": 2_0_4_8,
}
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Optional[Any] = VOCAB_FILES_NAMES
__lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : List[Any] = ['input_ids', 'attention_mask']
def __init__( self:int , _a:List[Any] , _a:List[str]="<s>" , _a:str="</s>" , _a:int="</s>" , _a:Optional[int]="<s>" , _a:Any="<unk>" , _a:int="<pad>" , _a:Optional[Dict[str, Any]] = None , **_a:int , ):
snake_case__ = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
snake_case__ = 7
snake_case__ = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
snake_case__ = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_a ) )
snake_case__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case__ = 1
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case__ = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
snake_case__ = len(self.sp_model )
snake_case__ = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(_a )
snake_case__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self:Optional[int] ):
snake_case__ = self.__dict__.copy()
snake_case__ = None
snake_case__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self:Optional[Any] , _a:Union[str, Any] ):
snake_case__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case__ = {}
snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:List[int] , _a:Optional[List[int]] = None ):
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
snake_case__ = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:List[int] , _a:Optional[List[int]] = None , _a:bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a ))
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a ))
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:List[int] , _a:Optional[List[int]] = None ):
snake_case__ = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:str ):
return self.sp_model.encode(_a , out_type=_a )
def SCREAMING_SNAKE_CASE__ ( self:str , _a:Optional[Any] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case__ = self.sp_model.PieceToId(_a )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:List[Any] ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE__ ( self:int , _a:Any ):
snake_case__ = ''''''.join(_a ).replace(_a , ''' ''' ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:str , _a:Optional[str] = None ):
if not os.path.isdir(_a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case__ = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , '''wb''' ) as fi:
snake_case__ = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 33 |
import numpy as np
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> np.ndarray:
return 1 / (1 + np.exp(-vector ))
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> np.ndarray:
return vector * sigmoid(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33 | 1 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
class __magic_name__ (snake_case_ ):
'''simple docstring'''
def __init__( self:Any , *_a:Optional[Any] , **_a:Optional[Any] ):
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''' , _a , )
super().__init__(*_a , **_a )
| 33 |
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 100 ) -> int:
snake_case__ = set()
snake_case__ = 0
snake_case__ = n + 1 # maximum limit
for a in range(2 , __lowerCAmelCase ):
for b in range(2 , __lowerCAmelCase ):
snake_case__ = a**b # calculates the current power
collect_powers.add(__lowerCAmelCase ) # adds the result to the set
return len(__lowerCAmelCase )
if __name__ == "__main__":
print("""Number of terms """, solution(int(str(input()).strip())))
| 33 | 1 |
lowerCamelCase__ : int = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
snake_case__ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
lowerCamelCase__ : list[bool | None] = [None] * 1_0_0_0_0_0_0_0
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : Union[str, Any] = False
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
snake_case__ = chain(next_number(__lowerCAmelCase ) )
snake_case__ = number_chain
while number < 1000_0000:
snake_case__ = number_chain
number *= 10
return number_chain
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 1000_0000 ) -> int:
for i in range(1 , __lowerCAmelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""")
| 33 |
from copy import deepcopy
class __magic_name__ :
'''simple docstring'''
def __init__( self:int , _a:list[int] | None = None , _a:int | None = None ):
if arr is None and size is not None:
snake_case__ = size
snake_case__ = [0] * size
elif arr is not None:
self.init(_a )
else:
raise ValueError('''Either arr or size must be specified''' )
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:list[int] ):
snake_case__ = len(_a )
snake_case__ = deepcopy(_a )
for i in range(1 , self.size ):
snake_case__ = self.next_(_a )
if j < self.size:
self.tree[j] += self.tree[i]
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
snake_case__ = self.next_(_a )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _a:int ):
return index + (index & (-index))
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _a:int ):
return index - (index & (-index))
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:int , _a:int ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
snake_case__ = self.next_(_a )
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:int ):
self.add(_a , value - self.get(_a ) )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:int ):
if right == 0:
return 0
snake_case__ = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
snake_case__ = self.prev(_a )
return result
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:int ):
return self.prefix(_a ) - self.prefix(_a )
def SCREAMING_SNAKE_CASE__ ( self:str , _a:int ):
return self.query(_a , index + 1 )
def SCREAMING_SNAKE_CASE__ ( self:str , _a:int ):
value -= self.tree[0]
if value < 0:
return -1
snake_case__ = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
snake_case__ = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33 | 1 |
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
if height >= 1:
move_tower(height - 1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
move_disk(__lowerCAmelCase , __lowerCAmelCase )
move_tower(height - 1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Any:
print('''moving disk from''' , __lowerCAmelCase , '''to''' , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
snake_case__ = int(input('''Height of hanoi: ''' ).strip() )
move_tower(__lowerCAmelCase , '''A''' , '''B''' , '''C''' )
if __name__ == "__main__":
main()
| 33 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class __magic_name__ :
'''simple docstring'''
__lowercase : int = BlenderbotConfig
__lowercase : Any = {}
__lowercase : Optional[Any] = 'gelu'
def __init__( self:Tuple , _a:Optional[Any] , _a:Optional[Any]=13 , _a:Tuple=7 , _a:Union[str, Any]=True , _a:int=False , _a:int=99 , _a:Optional[int]=32 , _a:List[str]=2 , _a:List[str]=4 , _a:List[Any]=37 , _a:Any=0.1 , _a:int=0.1 , _a:List[Any]=20 , _a:List[str]=2 , _a:int=1 , _a:Dict=0 , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = eos_token_id
snake_case__ = pad_token_id
snake_case__ = bos_token_id
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case__ = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case__ = prepare_blenderbot_inputs_dict(_a , _a , _a )
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self:int , _a:Optional[Any] , _a:int ):
snake_case__ = TFBlenderbotModel(config=_a ).get_decoder()
snake_case__ = inputs_dict['''input_ids''']
snake_case__ = input_ids[:1, :]
snake_case__ = inputs_dict['''attention_mask'''][:1, :]
snake_case__ = inputs_dict['''head_mask''']
snake_case__ = 1
# first forward pass
snake_case__ = model(_a , attention_mask=_a , head_mask=_a , use_cache=_a )
snake_case__ , snake_case__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
snake_case__ = tf.concat([input_ids, next_tokens] , axis=-1 )
snake_case__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
snake_case__ = model(_a , attention_mask=_a )[0]
snake_case__ = model(_a , attention_mask=_a , past_key_values=_a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
snake_case__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
snake_case__ = output_from_no_past[:, -3:, random_slice_idx]
snake_case__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_a , _a , rtol=1e-3 )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> Tuple:
if attention_mask is None:
snake_case__ = tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : List[str] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__lowercase : Any = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__lowercase : Tuple = (
{
'conversational': TFBlenderbotForConditionalGeneration,
'feature-extraction': TFBlenderbotModel,
'summarization': TFBlenderbotForConditionalGeneration,
'text2text-generation': TFBlenderbotForConditionalGeneration,
'translation': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowercase : Any = True
__lowercase : int = False
__lowercase : int = False
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = TFBlenderbotModelTester(self )
snake_case__ = ConfigTester(self , config_class=_a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_a )
@require_tokenizers
@require_tf
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[int] = ['My friends are cool but they eat too many carbs.']
__lowercase : Optional[int] = 'facebook/blenderbot-400M-distill'
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.tokenizer(self.src_text , return_tensors='''tf''' )
snake_case__ = self.model.generate(
model_inputs.input_ids , )
snake_case__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_a )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 33 | 1 |
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : int = {
"""facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : str = 'data2vec-audio'
def __init__( self:Optional[int] , _a:str=32 , _a:Tuple=7_68 , _a:List[str]=12 , _a:int=12 , _a:List[str]=30_72 , _a:int="gelu" , _a:Dict=0.1 , _a:int=0.1 , _a:int=0.1 , _a:int=0.0 , _a:Optional[Any]=0.1 , _a:List[Any]=0.1 , _a:Union[str, Any]=0.02 , _a:List[str]=1e-5 , _a:Any="gelu" , _a:Tuple=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , _a:Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , _a:List[Any]=(10, 3, 3, 3, 3, 2, 2) , _a:Any=False , _a:Dict=16 , _a:Dict=19 , _a:Optional[int]=5 , _a:Union[str, Any]=0.05 , _a:Any=10 , _a:Tuple=2 , _a:Tuple=0.0 , _a:List[str]=10 , _a:int=0 , _a:Tuple="sum" , _a:Any=False , _a:List[Any]=False , _a:str=2_56 , _a:Dict=(5_12, 5_12, 5_12, 5_12, 15_00) , _a:str=(5, 3, 3, 1, 1) , _a:List[str]=(1, 2, 3, 1, 1) , _a:List[Any]=5_12 , _a:Union[str, Any]=0 , _a:Optional[Any]=1 , _a:Tuple=2 , _a:int=False , _a:Any=3 , _a:int=2 , _a:Optional[Any]=3 , _a:Any=None , **_a:Optional[Any] , ):
super().__init__(**_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a )
snake_case__ = hidden_size
snake_case__ = feat_extract_activation
snake_case__ = list(_a )
snake_case__ = list(_a )
snake_case__ = list(_a )
snake_case__ = conv_bias
snake_case__ = num_conv_pos_embeddings
snake_case__ = num_conv_pos_embedding_groups
snake_case__ = conv_pos_kernel_size
snake_case__ = len(self.conv_dim )
snake_case__ = num_hidden_layers
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = num_attention_heads
snake_case__ = hidden_dropout
snake_case__ = attention_dropout
snake_case__ = activation_dropout
snake_case__ = feat_proj_dropout
snake_case__ = final_dropout
snake_case__ = layerdrop
snake_case__ = layer_norm_eps
snake_case__ = initializer_range
snake_case__ = vocab_size
snake_case__ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case__ = mask_time_prob
snake_case__ = mask_time_length
snake_case__ = mask_time_min_masks
snake_case__ = mask_feature_prob
snake_case__ = mask_feature_length
snake_case__ = mask_feature_min_masks
# ctc loss
snake_case__ = ctc_loss_reduction
snake_case__ = ctc_zero_infinity
# adapter
snake_case__ = add_adapter
snake_case__ = adapter_kernel_size
snake_case__ = adapter_stride
snake_case__ = num_adapter_layers
snake_case__ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case__ = list(_a )
snake_case__ = list(_a )
snake_case__ = list(_a )
snake_case__ = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
return math.prod(self.conv_stride )
| 33 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = 0
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:str ):
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
snake_case__ = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
snake_case__ = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
snake_case__ = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
snake_case__ = AutoImageProcessor.from_pretrained(_a ).to_dict()
config_dict.pop('''image_processor_type''' )
snake_case__ = CLIPImageProcessor(**_a )
# save in new folder
model_config.save_pretrained(_a )
config.save_pretrained(_a )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
# make sure private variable is not incorrectly saved
snake_case__ = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
with self.assertRaisesRegex(
_a , '''clip-base is not a local folder and is not a valid model identifier''' ):
snake_case__ = AutoImageProcessor.from_pretrained('''clip-base''' )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
with self.assertRaisesRegex(
_a , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
snake_case__ = AutoImageProcessor.from_pretrained(_a , revision='''aaaaaa''' )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
with self.assertRaisesRegex(
_a , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_a ):
snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
snake_case__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
snake_case__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
snake_case__ = AutoImageProcessor.from_pretrained(_a , trust_remote_code=_a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoImageProcessor.register(_a , _a )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
snake_case__ = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
snake_case__ = CustomImageProcessor.from_pretrained(_a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : List[str] = True
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# If remote code is not set, the default is to use local
snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
snake_case__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
snake_case__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(_a , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 33 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def __init__( self:Union[str, Any] , _a:Optional[int] , _a:Tuple=7 , _a:Dict=3 , _a:Optional[Any]=18 , _a:Optional[Any]=30 , _a:Union[str, Any]=4_00 , _a:str=True , _a:Optional[Any]=32 , _a:Tuple=True , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = num_channels
snake_case__ = image_size
snake_case__ = min_resolution
snake_case__ = max_resolution
snake_case__ = do_resize
snake_case__ = size_divisor
snake_case__ = do_rescale
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __magic_name__ (snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[Any] = GLPNImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = GLPNImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size_divisor''' ) )
self.assertTrue(hasattr(_a , '''resample''' ) )
self.assertTrue(hasattr(_a , '''do_rescale''' ) )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
pass
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
snake_case__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
snake_case__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
snake_case__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 33 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ : int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=False ) -> int:
snake_case__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case__ = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ) -> Dict:
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ = ''''''
else:
snake_case__ = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
snake_case__ = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ = in_proj_weight[
: config.hidden_size, :
]
snake_case__ = in_proj_bias[: config.hidden_size]
snake_case__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[Any]:
snake_case__ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
snake_case__ = dct.pop(__lowerCAmelCase )
snake_case__ = val
def SCREAMING_SNAKE_CASE ( ) -> str:
snake_case__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
snake_case__ = ViTConfig()
snake_case__ = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
snake_case__ = True
snake_case__ = int(vit_name[-12:-10] )
snake_case__ = int(vit_name[-9:-6] )
else:
snake_case__ = 1000
snake_case__ = '''huggingface/label-files'''
snake_case__ = '''imagenet-1k-id2label.json'''
snake_case__ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
snake_case__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case__ = idalabel
snake_case__ = {v: k for k, v in idalabel.items()}
snake_case__ = int(vit_name[-6:-4] )
snake_case__ = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('''tiny''' ):
snake_case__ = 192
snake_case__ = 768
snake_case__ = 12
snake_case__ = 3
elif vit_name[9:].startswith('''small''' ):
snake_case__ = 384
snake_case__ = 1536
snake_case__ = 12
snake_case__ = 6
else:
pass
else:
if vit_name[4:].startswith('''small''' ):
snake_case__ = 768
snake_case__ = 2304
snake_case__ = 8
snake_case__ = 8
elif vit_name[4:].startswith('''base''' ):
pass
elif vit_name[4:].startswith('''large''' ):
snake_case__ = 1024
snake_case__ = 4096
snake_case__ = 24
snake_case__ = 16
elif vit_name[4:].startswith('''huge''' ):
snake_case__ = 1280
snake_case__ = 5120
snake_case__ = 32
snake_case__ = 16
# load original model from timm
snake_case__ = timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ = timm_model.state_dict()
if base_model:
remove_classification_head_(__lowerCAmelCase )
snake_case__ = create_rename_keys(__lowerCAmelCase , __lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case__ = ViTModel(__lowerCAmelCase ).eval()
else:
snake_case__ = ViTForImageClassification(__lowerCAmelCase ).eval()
model.load_state_dict(__lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
snake_case__ = DeiTImageProcessor(size=config.image_size )
else:
snake_case__ = ViTImageProcessor(size=config.image_size )
snake_case__ = image_processor(images=prepare_img() , return_tensors='''pt''' )
snake_case__ = encoding['''pixel_values''']
snake_case__ = model(__lowerCAmelCase )
if base_model:
snake_case__ = timm_model.forward_features(__lowerCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__lowerCAmelCase , outputs.pooler_output , atol=1e-3 )
else:
snake_case__ = timm_model(__lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCAmelCase , outputs.logits , atol=1e-3 )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_patch16_224""",
type=str,
help="""Name of the ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowerCamelCase__ : str = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 33 | 1 |
import numpy as np
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> np.array:
return 1 / (1 + np.exp(-vector ))
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> np.array:
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : List[str] = ['image_processor', 'tokenizer']
__lowercase : str = 'AutoImageProcessor'
__lowercase : Dict = 'AutoTokenizer'
def __init__( self:int , _a:List[str]=None , _a:Optional[Any]=None , **_a:List[str] ):
snake_case__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
snake_case__ = kwargs.pop('''feature_extractor''' )
snake_case__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
snake_case__ = self.image_processor
snake_case__ = False
def __call__( self:Optional[int] , *_a:str , **_a:int ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_a , **_a )
snake_case__ = kwargs.pop('''images''' , _a )
snake_case__ = kwargs.pop('''text''' , _a )
if len(_a ) > 0:
snake_case__ = args[0]
snake_case__ = args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
snake_case__ = self.image_processor(_a , *_a , **_a )
if text is not None:
snake_case__ = self.tokenizer(_a , **_a )
if text is None:
return inputs
elif images is None:
return encodings
else:
snake_case__ = encodings['''input_ids''']
return inputs
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , *_a:Union[str, Any] , **_a:Any ):
return self.tokenizer.batch_decode(*_a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple , *_a:Union[str, Any] , **_a:Optional[int] ):
return self.tokenizer.decode(*_a , **_a )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
snake_case__ = True
snake_case__ = self.tokenizer
yield
snake_case__ = self.image_processor
snake_case__ = False
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Dict , _a:Dict=False , _a:Optional[int]=None ):
if added_vocab is None:
snake_case__ = self.tokenizer.get_added_vocab()
snake_case__ = {}
while tokens:
snake_case__ = re.search(r'''<s_(.*?)>''' , _a , re.IGNORECASE )
if start_token is None:
break
snake_case__ = start_token.group(1 )
snake_case__ = re.search(rF"""</s_{key}>""" , _a , re.IGNORECASE )
snake_case__ = start_token.group()
if end_token is None:
snake_case__ = tokens.replace(_a , '''''' )
else:
snake_case__ = end_token.group()
snake_case__ = re.escape(_a )
snake_case__ = re.escape(_a )
snake_case__ = re.search(F"""{start_token_escaped}(.*?){end_token_escaped}""" , _a , re.IGNORECASE )
if content is not None:
snake_case__ = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
snake_case__ = self.tokenajson(_a , is_inner_value=_a , added_vocab=_a )
if value:
if len(_a ) == 1:
snake_case__ = value[0]
snake_case__ = value
else: # leaf nodes
snake_case__ = []
for leaf in content.split(r'''<sep/>''' ):
snake_case__ = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
snake_case__ = leaf[1:-2] # for categorical special tokens
output[key].append(_a )
if len(output[key] ) == 1:
snake_case__ = output[key][0]
snake_case__ = tokens[tokens.find(_a ) + len(_a ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_a , added_vocab=_a )
if len(_a ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 33 | 1 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def __init__( self:int , _a:Optional[int] , _a:List[Any]=7 , _a:Union[str, Any]=3 , _a:List[str]=18 , _a:Any=30 , _a:str=4_00 , _a:Optional[Any]=True , _a:List[Any]=None , _a:Optional[Any]=True , ):
snake_case__ = size if size is not None else {'''height''': 18, '''width''': 18}
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = num_channels
snake_case__ = image_size
snake_case__ = min_resolution
snake_case__ = max_resolution
snake_case__ = do_resize
snake_case__ = size
snake_case__ = do_normalize
def SCREAMING_SNAKE_CASE__ ( self:int ):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866443634033203, 0.6618829369544983, 0.3891746401786804],
[-0.6042559146881104, -0.02295008860528469, 0.5423797369003296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class __magic_name__ (snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : str = ImageGPTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = ImageGPTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self:Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''clusters''' ) )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = self.image_processing_class(**self.image_processor_dict )
snake_case__ = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(_a , obj[key] ) )
else:
self.assertEqual(obj[key] , _a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = os.path.join(_a , '''image_processor.json''' )
image_processor_first.to_json_file(_a )
snake_case__ = self.image_processing_class.from_json_file(_a ).to_dict()
snake_case__ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_a , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _a )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(_a )
snake_case__ = self.image_processing_class.from_pretrained(_a ).to_dict()
snake_case__ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_a , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _a )
@unittest.skip('''ImageGPT requires clusters at initialization''' )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
pass
def SCREAMING_SNAKE_CASE ( ) -> Dict:
snake_case__ = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
snake_case__ = Image.open(dataset[4]['''file'''] )
snake_case__ = Image.open(dataset[5]['''file'''] )
snake_case__ = [imagea, imagea]
return images
@require_vision
@require_torch
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''' )
snake_case__ = prepare_images()
# test non-batched
snake_case__ = image_processing(images[0] , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 10_24) )
snake_case__ = [3_06, 1_91, 1_91]
self.assertEqual(encoding.input_ids[0, :3].tolist() , _a )
# test batched
snake_case__ = image_processing(_a , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 10_24) )
snake_case__ = [3_03, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , _a )
| 33 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __magic_name__ :
'''simple docstring'''
def __init__( self:Optional[Any] , _a:int , _a:str=3 , _a:Optional[int]=32 , _a:Optional[Any]=3 , _a:Tuple=10 , _a:List[Any]=[8, 16, 32, 64] , _a:str=[1, 1, 2, 1] , _a:Any=True , _a:List[Any]=True , _a:List[str]="relu" , _a:int=3 , _a:Tuple=None , _a:Tuple=["stage2", "stage3", "stage4"] , _a:List[Any]=[2, 3, 4] , _a:Union[str, Any]=1 , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = image_size
snake_case__ = num_channels
snake_case__ = embeddings_size
snake_case__ = hidden_sizes
snake_case__ = depths
snake_case__ = is_training
snake_case__ = use_labels
snake_case__ = hidden_act
snake_case__ = num_labels
snake_case__ = scope
snake_case__ = len(_a )
snake_case__ = out_features
snake_case__ = out_indices
snake_case__ = num_groups
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:Optional[int] , _a:Tuple , _a:int ):
snake_case__ = BitModel(config=_a )
model.to(_a )
model.eval()
snake_case__ = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE__ ( self:int , _a:Tuple , _a:Any , _a:Union[str, Any] ):
snake_case__ = self.num_labels
snake_case__ = BitForImageClassification(_a )
model.to(_a )
model.eval()
snake_case__ = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self:str , _a:str , _a:List[str] , _a:Any ):
snake_case__ = BitBackbone(config=_a )
model.to(_a )
model.eval()
snake_case__ = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
snake_case__ = None
snake_case__ = BitBackbone(config=_a )
model.to(_a )
model.eval()
snake_case__ = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ = config_and_inputs
snake_case__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Any = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
__lowercase : int = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
__lowercase : Tuple = False
__lowercase : Optional[Any] = False
__lowercase : str = False
__lowercase : Tuple = False
__lowercase : Tuple = False
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = BitModelTester(self )
snake_case__ = ConfigTester(self , config_class=_a , has_text_modality=_a )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return
@unittest.skip(reason='''Bit does not output attentions''' )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
pass
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = model_class(_a )
snake_case__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ = [*signature.parameters.keys()]
snake_case__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = model_class(config=_a )
for name, module in model.named_modules():
if isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
def check_hidden_states_output(_a:List[Any] , _a:int , _a:Union[str, Any] ):
snake_case__ = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
snake_case__ = model(**self._prepare_for_class(_a , _a ) )
snake_case__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case__ = self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
snake_case__ = layer_type
snake_case__ = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ = True
check_hidden_states_output(_a , _a , _a )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
pass
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ = BitModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def SCREAMING_SNAKE_CASE ( ) -> Any:
snake_case__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a )
snake_case__ = self.default_image_processor
snake_case__ = prepare_img()
snake_case__ = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
snake_case__ = model(**_a )
# verify the logits
snake_case__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _a )
snake_case__ = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
@require_torch
class __magic_name__ (snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[Any] = (BitBackbone,) if is_torch_available() else ()
__lowercase : int = BitConfig
__lowercase : Any = False
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = BitModelTester(self )
| 33 | 1 |
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> list:
snake_case__ = int(__lowerCAmelCase )
if n_element < 1:
snake_case__ = ValueError('''a should be a positive number''' )
raise my_error
snake_case__ = [1]
snake_case__ , snake_case__ , snake_case__ = (0, 0, 0)
snake_case__ = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCamelCase__ : List[str] = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCamelCase__ : Optional[Any] = hamming(int(n))
print("""-----------------------------------------------------""")
print(F"""The list with nth numbers is: {hamming_numbers}""")
print("""-----------------------------------------------------""")
| 33 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
lowerCamelCase__ : Any = """\
"""
lowerCamelCase__ : List[str] = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
lowerCamelCase__ : Any = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __magic_name__ (datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:int , _a:List[Any] , _a:int = 16 , _a:bool = True , _a:Any=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
snake_case__ = '''cuda'''
else:
snake_case__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
snake_case__ = AutoModelForCausalLM.from_pretrained(_a )
snake_case__ = model.to(_a )
snake_case__ = AutoTokenizer.from_pretrained(_a )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
snake_case__ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_a ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
snake_case__ = model.config.max_length - 1
else:
snake_case__ = model.config.max_length
snake_case__ = tokenizer(
_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , return_tensors='''pt''' , return_attention_mask=_a , ).to(_a )
snake_case__ = encodings['''input_ids''']
snake_case__ = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
snake_case__ = []
snake_case__ = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(_a ) , _a ) ):
snake_case__ = min(start_index + batch_size , len(_a ) )
snake_case__ = encoded_texts[start_index:end_index]
snake_case__ = attn_masks[start_index:end_index]
if add_start_token:
snake_case__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_a )
snake_case__ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
snake_case__ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_a ), attn_mask] , dim=1 )
snake_case__ = encoded_batch
with torch.no_grad():
snake_case__ = model(_a , attention_mask=_a ).logits
snake_case__ = out_logits[..., :-1, :].contiguous()
snake_case__ = labels[..., 1:].contiguous()
snake_case__ = attn_mask[..., 1:].contiguous()
snake_case__ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _a ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_a )}
| 33 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ : List[Any] = {
"""configuration_efficientformer""": [
"""EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientFormerConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : str = ["""EfficientFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[int] = [
"""EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientFormerForImageClassification""",
"""EfficientFormerForImageClassificationWithTeacher""",
"""EfficientFormerModel""",
"""EfficientFormerPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = [
"""TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFEfficientFormerForImageClassification""",
"""TFEfficientFormerForImageClassificationWithTeacher""",
"""TFEfficientFormerModel""",
"""TFEfficientFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 33 |
import os
from datetime import datetime as dt
from github import Github
lowerCamelCase__ : int = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
snake_case__ = Github(os.environ['''GITHUB_TOKEN'''] )
snake_case__ = g.get_repo('''huggingface/diffusers''' )
snake_case__ = repo.get_issues(state='''open''' )
for issue in open_issues:
snake_case__ = sorted(issue.get_comments() , key=lambda __lowerCAmelCase : i.created_at , reverse=__lowerCAmelCase )
snake_case__ = comments[0] if len(__lowerCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 33 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ : Tuple = {
"""configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""],
"""tokenization_roberta""": ["""RobertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = ["""RobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[int] = [
"""ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaForCausalLM""",
"""RobertaForMaskedLM""",
"""RobertaForMultipleChoice""",
"""RobertaForQuestionAnswering""",
"""RobertaForSequenceClassification""",
"""RobertaForTokenClassification""",
"""RobertaModel""",
"""RobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : List[str] = [
"""TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaForCausalLM""",
"""TFRobertaForMaskedLM""",
"""TFRobertaForMultipleChoice""",
"""TFRobertaForQuestionAnswering""",
"""TFRobertaForSequenceClassification""",
"""TFRobertaForTokenClassification""",
"""TFRobertaMainLayer""",
"""TFRobertaModel""",
"""TFRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : str = [
"""FlaxRobertaForCausalLM""",
"""FlaxRobertaForMaskedLM""",
"""FlaxRobertaForMultipleChoice""",
"""FlaxRobertaForQuestionAnswering""",
"""FlaxRobertaForSequenceClassification""",
"""FlaxRobertaForTokenClassification""",
"""FlaxRobertaModel""",
"""FlaxRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 33 |
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'''kwargs, expected''' , [
({'''num_shards''': 0, '''max_num_jobs''': 1}, []),
({'''num_shards''': 10, '''max_num_jobs''': 1}, [range(10 )]),
({'''num_shards''': 10, '''max_num_jobs''': 10}, [range(__lowerCAmelCase , i + 1 ) for i in range(10 )]),
({'''num_shards''': 1, '''max_num_jobs''': 10}, [range(1 )]),
({'''num_shards''': 10, '''max_num_jobs''': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'''num_shards''': 3, '''max_num_jobs''': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
snake_case__ = _distribute_shards(**__lowerCAmelCase )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, max_num_jobs, expected''' , [
({'''foo''': 0}, 10, [{'''foo''': 0}]),
({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]),
({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]),
({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]),
({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]),
] , )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
snake_case__ = _split_gen_kwargs(__lowerCAmelCase , __lowerCAmelCase )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, expected''' , [
({'''foo''': 0}, 1),
({'''shards''': [0]}, 1),
({'''shards''': [0, 1, 2, 3]}, 4),
({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4),
({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4),
({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError),
] , )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
if expected is RuntimeError:
with pytest.raises(__lowerCAmelCase ):
_number_of_shards_in_gen_kwargs(__lowerCAmelCase )
else:
snake_case__ = _number_of_shards_in_gen_kwargs(__lowerCAmelCase )
assert out == expected
| 33 | 1 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowerCamelCase_ ( lowerCamelCase ):
a__ = ''''''
a__ = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(self , **__lowerCAmelCase )
__magic_name__ :List[Any] = repo_info
__magic_name__ :Dict = token
__magic_name__ :Optional[Any] = None
def A ( self ):
"""simple docstring"""
if self.dir_cache is None:
__magic_name__ :Any = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__magic_name__ :Optional[int] = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(__lowerCAmelCase ): {'''name''': str(__lowerCAmelCase ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def A ( self , __lowerCAmelCase , __lowerCAmelCase = "rb" , **__lowerCAmelCase , ):
"""simple docstring"""
if not isinstance(self.repo_info , __lowerCAmelCase ):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
__magic_name__ :Union[str, Any] = hf_hub_url(self.repo_info.id , __lowerCAmelCase , revision=self.repo_info.sha )
return fsspec.open(
__lowerCAmelCase , mode=__lowerCAmelCase , headers=get_authentication_headers_for_url(__lowerCAmelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def A ( self , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
self._get_dirs()
__magic_name__ :str = self._strip_protocol(__lowerCAmelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__lowerCAmelCase )
def A ( self , __lowerCAmelCase , __lowerCAmelCase=False , **__lowerCAmelCase ):
"""simple docstring"""
self._get_dirs()
__magic_name__ :Union[str, Any] = PurePosixPath(path.strip('''/''' ) )
__magic_name__ :Dict = {}
for p, f in self.dir_cache.items():
__magic_name__ :int = PurePosixPath(p.strip('''/''' ) )
__magic_name__ :Tuple = p.parent
if root == path:
__magic_name__ :Optional[Any] = f
__magic_name__ :List[Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 0 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : str = IFImgaImgSuperResolutionPipeline
__lowercase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'}
__lowercase : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'} )
__lowercase : List[str] = PipelineTesterMixin.required_optional_params - {'latents'}
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
return self._get_superresolution_dummy_components()
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:Optional[Any]=0 ):
if str(_a ).startswith('''mps''' ):
snake_case__ = torch.manual_seed(_a )
else:
snake_case__ = torch.Generator(device=_a ).manual_seed(_a )
snake_case__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
snake_case__ = floats_tensor((1, 3, 16, 16) , rng=random.Random(_a ) ).to(_a )
snake_case__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self:str ):
self._test_save_load_local()
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 33 | 0 |
from __future__ import annotations
from collections.abc import Callable
__snake_case = list[list[float | int]]
def _A ( _lowercase , _lowercase ) -> Matrix:
"""simple docstring"""
__UpperCamelCase = len(_lowercase )
__UpperCamelCase = [[0 for _ in range(size + 1 )] for _ in range(_lowercase )]
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
for row in range(_lowercase ):
for col in range(_lowercase ):
__UpperCamelCase = matrix[row][col]
__UpperCamelCase = vector[row][0]
__UpperCamelCase = 0
__UpperCamelCase = 0
while row < size and col < size:
# pivoting
__UpperCamelCase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_lowercase , _lowercase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
__UpperCamelCase, __UpperCamelCase = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _lowercase ):
__UpperCamelCase = augmented[rowa][col] / augmented[row][col]
__UpperCamelCase = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _lowercase ):
for row in range(_lowercase ):
__UpperCamelCase = augmented[row][col] / augmented[col][col]
for cola in range(_lowercase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_lowercase )
]
def _A ( _lowercase ) -> Callable[[int], int]:
"""simple docstring"""
__UpperCamelCase = len(_lowercase )
__UpperCamelCase = [[0 for _ in range(_lowercase )] for _ in range(_lowercase )]
__UpperCamelCase = [[0] for _ in range(_lowercase )]
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
for x_val, y_val in enumerate(_lowercase ):
for col in range(_lowercase ):
__UpperCamelCase = (x_val + 1) ** (size - col - 1)
__UpperCamelCase = y_val
__UpperCamelCase = solve(_lowercase , _lowercase )
def interpolated_func(_lowercase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_lowercase ) )
return interpolated_func
def _A ( _lowercase ) -> int:
"""simple docstring"""
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def _A ( _lowercase = question_function , _lowercase = 10 ) -> int:
"""simple docstring"""
__UpperCamelCase = [func(_lowercase ) for x_val in range(1 , order + 1 )]
__UpperCamelCase = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
__UpperCamelCase = 0
__UpperCamelCase = 42
__UpperCamelCase = 42
for poly in polynomials:
__UpperCamelCase = 1
while func(_lowercase ) == poly(_lowercase ):
x_val += 1
ret += poly(_lowercase )
return ret
if __name__ == "__main__":
print(f"""{solution() = }""")
| 1 |
import math
class __magic_name__ :
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:list[list[float]] , _a:list[int] ):
snake_case__ = 0.0
snake_case__ = 0.0
for i in range(len(_a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:list[list[int | float]] , _a:list[int] , _a:int , _a:float ):
for i in range(len(_a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def SCREAMING_SNAKE_CASE ( ) -> None:
# Training Examples ( m, n )
snake_case__ = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
snake_case__ = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
snake_case__ = SelfOrganizingMap()
snake_case__ = 3
snake_case__ = 0.5
for _ in range(__lowerCAmelCase ):
for j in range(len(__lowerCAmelCase ) ):
# training sample
snake_case__ = training_samples[j]
# Compute the winning vector
snake_case__ = self_organizing_map.get_winner(__lowerCAmelCase , __lowerCAmelCase )
# Update the winning vector
snake_case__ = self_organizing_map.update(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# classify test sample
snake_case__ = [0, 0, 0, 1]
snake_case__ = self_organizing_map.get_winner(__lowerCAmelCase , __lowerCAmelCase )
# results
print(F"""Clusters that the test sample belongs to : {winner}""" )
print(F"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 33 | 0 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
UpperCAmelCase_ = Path(__file__).resolve().parents[3] / """src"""
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(4_2)
UpperCAmelCase_ = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""}
UpperCAmelCase_ = """zero2"""
UpperCAmelCase_ = """zero3"""
UpperCAmelCase_ = [ZEROa, ZEROa]
def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple , _snake_case :Dict , _snake_case :Union[str, Any] ) -> Tuple:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
_A = parameterized.to_safe_name('''_'''.join(str(_snake_case ) for x in param.args ) )
return F'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
UpperCAmelCase_ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class lowerCamelCase__ ( _A):
"""simple docstring"""
@parameterized.expand(__lowerCAmelCase , name_func=__lowerCAmelCase )
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : str ) -> Dict:
self.run_and_check(
stage=__lowerCAmelCase , model=__lowerCAmelCase , distributed=__lowerCAmelCase , fpaa=__lowerCAmelCase , )
@require_torch_multi_gpu
@parameterized.expand(__lowerCAmelCase , name_func=__lowerCAmelCase )
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any ) -> Dict:
self.run_and_check(
stage=__lowerCAmelCase , model=__lowerCAmelCase , distributed=__lowerCAmelCase , fpaa=__lowerCAmelCase , )
@parameterized.expand(__lowerCAmelCase , name_func=__lowerCAmelCase )
def snake_case_ ( self : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> str:
self.run_and_check(
stage=__lowerCAmelCase , model=__lowerCAmelCase , distributed=__lowerCAmelCase , fpaa=__lowerCAmelCase , )
@require_torch_multi_gpu
@parameterized.expand(__lowerCAmelCase , name_func=__lowerCAmelCase )
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] ) -> List[str]:
self.run_and_check(
stage=__lowerCAmelCase , model=__lowerCAmelCase , distributed=__lowerCAmelCase , fpaa=__lowerCAmelCase , )
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : Optional[int] ) -> int:
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : int = 10 , __lowerCAmelCase : bool = True , __lowerCAmelCase : bool = True , __lowerCAmelCase : bool = True , ) -> Any:
_A = models[model]
_A = self.run_trainer(
stage=__lowerCAmelCase , model_name=__lowerCAmelCase , eval_steps=__lowerCAmelCase , num_train_epochs=1 , distributed=__lowerCAmelCase , fpaa=__lowerCAmelCase , )
self.do_checks(__lowerCAmelCase )
return output_dir
def snake_case_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : int = 10 , __lowerCAmelCase : int = 1 , __lowerCAmelCase : bool = True , __lowerCAmelCase : bool = True , ) -> Any:
_A = self.get_auto_remove_tmp_dir('''./xxx''' , after=__lowerCAmelCase )
_A = f'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(__lowerCAmelCase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['''--fp16'''] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_A = f'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
_A = [f'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
_A = self.get_launcher(__lowerCAmelCase )
_A = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__lowerCAmelCase , env=self.get_env() )
return output_dir
def snake_case_ ( self : List[str] , __lowerCAmelCase : Dict=False ) -> Tuple:
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
_A = min(2 , get_gpu_count() ) if distributed else 1
return f'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
| 2 |
from __future__ import annotations
from statistics import mean
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list[int]:
snake_case__ = [0] * no_of_processes
snake_case__ = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(__lowerCAmelCase ):
snake_case__ = burst_time[i]
snake_case__ = []
snake_case__ = 0
snake_case__ = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
snake_case__ = []
snake_case__ = -1
for i in range(__lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
snake_case__ = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
snake_case__ = i
total_time += burst_time[target_process]
completed += 1
snake_case__ = 0
snake_case__ = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list[int]:
snake_case__ = [0] * no_of_processes
for i in range(__lowerCAmelCase ):
snake_case__ = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
lowerCamelCase__ : Tuple = 4
lowerCamelCase__ : Union[str, Any] = [2, 5, 3, 7]
lowerCamelCase__ : Optional[Any] = [0, 0, 0, 0]
lowerCamelCase__ : Dict = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowerCamelCase__ : Union[str, Any] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 33 | 0 |
'''simple docstring'''
from collections import defaultdict
from math import ceil, sqrt
def A_( A : int = 100_0000 , A : int = 10):
UpperCamelCase = defaultdict(A)
for outer_width in range(3 , (t_limit // 4) + 2):
if outer_width * outer_width > t_limit:
UpperCamelCase = max(
ceil(sqrt(outer_width * outer_width - t_limit)) , 1)
else:
UpperCamelCase = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(A , outer_width - 1 , 2):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10)
if __name__ == "__main__":
print(f"""{solution() = }""")
| 3 |
lowerCamelCase__ : List[str] = """Alexander Joslin"""
import operator as op
from .stack import Stack
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
snake_case__ = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
snake_case__ = Stack()
snake_case__ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__lowerCAmelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(__lowerCAmelCase )
elif i == ")":
# RULE 4
snake_case__ = operator_stack.peek()
operator_stack.pop()
snake_case__ = operand_stack.peek()
operand_stack.pop()
snake_case__ = operand_stack.peek()
operand_stack.pop()
snake_case__ = operators[opr](__lowerCAmelCase , __lowerCAmelCase )
operand_stack.push(__lowerCAmelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase__ : Optional[Any] = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 33 | 0 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : list ):
_enforce_args(_UpperCAmelCase , _UpperCAmelCase )
if n == 0:
return 0
lowerCAmelCase = float('-inf' )
for i in range(1 , n + 1 ):
lowerCAmelCase = max(
_UpperCAmelCase , prices[i - 1] + naive_cut_rod_recursive(n - i , _UpperCAmelCase ) )
return max_revue
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : list ):
_enforce_args(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = [float('-inf' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : list , _UpperCAmelCase : list ):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
lowerCAmelCase = float('-inf' )
for i in range(1 , n + 1 ):
lowerCAmelCase = max(
_UpperCAmelCase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , _UpperCAmelCase , _UpperCAmelCase ) , )
lowerCAmelCase = max_revenue
return max_rev[n]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : list ):
_enforce_args(_UpperCAmelCase , _UpperCAmelCase )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
lowerCAmelCase = [float('-inf' ) for _ in range(n + 1 )]
lowerCAmelCase = 0
for i in range(1 , n + 1 ):
lowerCAmelCase = max_rev[i]
for j in range(1 , i + 1 ):
lowerCAmelCase = max(_UpperCAmelCase , prices[j - 1] + max_rev[i - j] )
lowerCAmelCase = max_revenue_i
return max_rev[n]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : list ):
if n < 0:
lowerCAmelCase = F'n must be greater than or equal to 0. Got n = {n}'
raise ValueError(_UpperCAmelCase )
if n > len(_UpperCAmelCase ):
lowerCAmelCase = (
'Each integral piece of rod must have a corresponding price. '
F'Got n = {n} but length of prices = {len(_UpperCAmelCase )}'
)
raise ValueError(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = [6, 10, 12, 15, 20, 23]
lowerCAmelCase = len(_UpperCAmelCase )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
lowerCAmelCase = 36
lowerCAmelCase = top_down_cut_rod(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = bottom_up_cut_rod(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = naive_cut_rod_recursive(_UpperCAmelCase , _UpperCAmelCase )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 4 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowerCamelCase__ : int = logging.get_logger(__name__)
class __magic_name__ (snake_case_ ):
'''simple docstring'''
def __init__( self:List[Any] , *_a:Dict , **_a:Tuple ):
warnings.warn(
'''The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PerceiverImageProcessor instead.''' , _a , )
super().__init__(*_a , **_a )
| 33 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowercase = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 5 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ : Tuple = {
"""configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""],
"""tokenization_roberta""": ["""RobertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = ["""RobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[int] = [
"""ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaForCausalLM""",
"""RobertaForMaskedLM""",
"""RobertaForMultipleChoice""",
"""RobertaForQuestionAnswering""",
"""RobertaForSequenceClassification""",
"""RobertaForTokenClassification""",
"""RobertaModel""",
"""RobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : List[str] = [
"""TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaForCausalLM""",
"""TFRobertaForMaskedLM""",
"""TFRobertaForMultipleChoice""",
"""TFRobertaForQuestionAnswering""",
"""TFRobertaForSequenceClassification""",
"""TFRobertaForTokenClassification""",
"""TFRobertaMainLayer""",
"""TFRobertaModel""",
"""TFRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : str = [
"""FlaxRobertaForCausalLM""",
"""FlaxRobertaForMaskedLM""",
"""FlaxRobertaForMultipleChoice""",
"""FlaxRobertaForQuestionAnswering""",
"""FlaxRobertaForSequenceClassification""",
"""FlaxRobertaForTokenClassification""",
"""FlaxRobertaModel""",
"""FlaxRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 33 | 0 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class UpperCamelCase_ :
def __init__( self :int , __A :Optional[Any] , __A :Any=13 , __A :Optional[int]=7 , __A :str=True , __A :Tuple=True , __A :Union[str, Any]=True , __A :int=True , __A :Any=99 , __A :Union[str, Any]=64 , __A :int=32 , __A :Tuple=5 , __A :int=4 , __A :Tuple=37 , __A :Optional[Any]="gelu" , __A :Union[str, Any]=0.1 , __A :Any=0.1 , __A :str=512 , __A :Tuple=16 , __A :Optional[Any]=2 , __A :Tuple=0.0_2 , __A :int=3 , __A :Union[str, Any]=4 , __A :List[Any]=None , ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_mask
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = embedding_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = scope
def _snake_case ( self :Dict ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self :int ) -> Union[str, Any]:
"""simple docstring"""
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , )
def _snake_case ( self :Optional[int] , __A :Any , __A :Union[str, Any] , __A :Union[str, Any] , __A :int , __A :str , __A :List[Any] , __A :List[str] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MegatronBertModel(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A , token_type_ids=__A )
SCREAMING_SNAKE_CASE__ = model(__A , token_type_ids=__A )
SCREAMING_SNAKE_CASE__ = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _snake_case ( self :List[Any] , __A :List[str] , __A :Optional[Any] , __A :Optional[int] , __A :List[str] , __A :List[Any] , __A :int , __A :Any ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MegatronBertForMaskedLM(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self :List[Any] , __A :Any , __A :Tuple , __A :str , __A :str , __A :List[str] , __A :Union[str, Any] , __A :Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MegatronBertForCausalLM(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self :Tuple , __A :int , __A :Optional[int] , __A :Union[str, Any] , __A :Union[str, Any] , __A :int , __A :Union[str, Any] , __A :str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MegatronBertForNextSentencePrediction(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _snake_case ( self :Union[str, Any] , __A :str , __A :Any , __A :Union[str, Any] , __A :List[Any] , __A :Any , __A :str , __A :Dict ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MegatronBertForPreTraining(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , next_sentence_label=__A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _snake_case ( self :Optional[Any] , __A :int , __A :Optional[Any] , __A :List[Any] , __A :Tuple , __A :Any , __A :Union[str, Any] , __A :Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MegatronBertForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(
__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self :str , __A :int , __A :List[Any] , __A :Tuple , __A :Optional[Any] , __A :Optional[int] , __A :Dict , __A :Optional[int] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = MegatronBertForSequenceClassification(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self :Optional[Any] , __A :List[Any] , __A :List[Any] , __A :Any , __A :Tuple , __A :Any , __A :Optional[int] , __A :str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = MegatronBertForTokenClassification(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self :Optional[Any] , __A :int , __A :List[Any] , __A :List[str] , __A :Any , __A :List[Any] , __A :Any , __A :Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.num_choices
SCREAMING_SNAKE_CASE__ = MegatronBertForMultipleChoice(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase_ = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase_ = True
# test_resize_embeddings = False
lowerCamelCase_ = False
def _snake_case ( self :int , __A :Tuple , __A :List[str] , __A :int=False ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class in get_values(__A ):
SCREAMING_SNAKE_CASE__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__A )
SCREAMING_SNAKE_CASE__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
return inputs_dict
def _snake_case ( self :Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MegatronBertModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__A , hidden_size=37 )
def _snake_case ( self :List[Any] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self :int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__A )
def _snake_case ( self :List[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__A )
def _snake_case ( self :Optional[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__A )
def _snake_case ( self :Any ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__A )
def _snake_case ( self :Optional[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__A )
def _snake_case ( self :List[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__A )
def _snake_case ( self :Tuple ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__A )
def _snake_case ( self :Optional[int] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__A )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
return torch.tensor(
UpperCamelCase__ , dtype=torch.long , device=UpperCamelCase__ , )
_lowerCamelCase = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( unittest.TestCase ):
@slow
@unittest.skip("""Model is not available.""" )
def _snake_case ( self :int ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """nvidia/megatron-bert-uncased-345m"""
if "MYDIR" in os.environ:
SCREAMING_SNAKE_CASE__ = os.path.join(os.environ["""MYDIR"""] , __A )
SCREAMING_SNAKE_CASE__ = MegatronBertModel.from_pretrained(__A )
model.to(__A )
model.half()
SCREAMING_SNAKE_CASE__ = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(__A )[0]
SCREAMING_SNAKE_CASE__ = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , __A )
SCREAMING_SNAKE_CASE__ = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3 ):
for jj in range(3 ):
SCREAMING_SNAKE_CASE__ = output[0, ii, jj]
SCREAMING_SNAKE_CASE__ = expected[3 * ii + jj]
SCREAMING_SNAKE_CASE__ = """ii={} jj={} a={} b={}""".format(__A , __A , __A , __A )
self.assertTrue(math.isclose(__A , __A , rel_tol=__A , abs_tol=__A ) , msg=__A ) | 6 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[Any]:
snake_case__ = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __magic_name__ (snake_case_ ,snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Dict = StableDiffusionLatentUpscalePipeline
__lowercase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
__lowercase : List[Any] = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
__lowercase : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowercase : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__lowercase : List[Any] = frozenset([] )
__lowercase : Any = True
@property
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = 1
snake_case__ = 4
snake_case__ = (16, 16)
snake_case__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_a )
return image
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
torch.manual_seed(0 )
snake_case__ = UNetaDConditionModel(
act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=_a , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) , in_channels=8 , mid_block_type=_a , only_cross_attention=_a , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , )
snake_case__ = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
snake_case__ = EulerDiscreteScheduler(prediction_type='''sample''' )
snake_case__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''quick_gelu''' , projection_dim=5_12 , )
snake_case__ = CLIPTextModel(_a )
snake_case__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case__ = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:Optional[Any] , _a:List[str]=0 ):
if str(_a ).startswith('''mps''' ):
snake_case__ = torch.manual_seed(_a )
else:
snake_case__ = torch.Generator(device=_a ).manual_seed(_a )
snake_case__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = '''cpu'''
snake_case__ = self.get_dummy_components()
snake_case__ = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = self.get_dummy_inputs(_a )
snake_case__ = pipe(**_a ).images
snake_case__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_56, 2_56, 3) )
snake_case__ = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
snake_case__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_a , 1e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:str ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
super().test_save_load_local(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:str ):
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
snake_case__ = self.get_dummy_components()
snake_case__ = self.pipeline_class(**_a )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = self.get_dummy_inputs(_a )
snake_case__ = 2
snake_case__ = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
snake_case__ = getattr(_a , scheduler_enum.name )
snake_case__ = scheduler_cls.from_config(pipe.scheduler.config )
snake_case__ = pipe(**_a )[0]
outputs.append(_a )
assert check_same_shape(_a )
@require_torch_gpu
@slow
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = torch.manual_seed(33 )
snake_case__ = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
snake_case__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
snake_case__ = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
snake_case__ = pipe(_a , generator=_a , output_type='''latent''' ).images
snake_case__ = upscaler(
prompt=_a , image=_a , num_inference_steps=20 , guidance_scale=0 , generator=_a , output_type='''np''' , ).images[0]
snake_case__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5e-2
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = torch.manual_seed(33 )
snake_case__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
snake_case__ = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
snake_case__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
snake_case__ = upscaler(
prompt=_a , image=_a , num_inference_steps=20 , guidance_scale=0 , generator=_a , output_type='''np''' , ).images[0]
snake_case__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5e-2
| 33 | 0 |
"""simple docstring"""
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class lowercase_ ( yaml.SafeLoader ):
'''simple docstring'''
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : List[Any] ):
_A = [self.constructed_objects[key_node] for key_node, _ in node.value]
_A = [tuple(_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else key for key in keys]
_A = Counter(_UpperCAmelCase )
_A = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'''Got duplicate yaml keys: {duplicate_keys}''' )
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any]=False ):
_A = super().construct_mapping(_UpperCAmelCase , deep=_UpperCAmelCase )
self._check_no_duplicates_on_constructed_node(_UpperCAmelCase )
return mapping
def _snake_case ( _snake_case : str ) -> Tuple[Optional[str], str]:
'''simple docstring'''
_A = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
_A = full_content[1:].index('---' ) + 1
_A = '\n'.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(_snake_case )
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
# class attributes
UpperCAmelCase : Union[str, Any] = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def lowerCAmelCase_ ( cls : int , _UpperCAmelCase : Path ):
with open(_UpperCAmelCase , encoding='utf-8' ) as readme_file:
_A , _A = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_UpperCAmelCase )
else:
return cls()
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Path ):
if path.exists():
with open(_UpperCAmelCase , encoding='utf-8' ) as readme_file:
_A = readme_file.read()
else:
_A = None
_A = self._to_readme(_UpperCAmelCase )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as readme_file:
readme_file.write(_UpperCAmelCase )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Optional[str] = None ):
if readme_content is not None:
_A , _A = _split_yaml_from_readme(_UpperCAmelCase )
_A = '---\n' + self.to_yaml_string() + '---\n' + content
else:
_A = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def lowerCAmelCase_ ( cls : List[str] , _UpperCAmelCase : str ):
_A = yaml.load(_UpperCAmelCase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
_A = {
(key.replace('-' , '_' ) if key.replace('-' , '_' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_UpperCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
return yaml.safe_dump(
{
(key.replace('_' , '-' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=_UpperCAmelCase , allow_unicode=_UpperCAmelCase , encoding='utf-8' , ).decode('utf-8' )
a = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
a = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
a = ap.parse_args()
a = Path(args.readme_filepath)
a = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 7 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = '''ZinengTang/tvlt-base'''
snake_case__ = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE__ ( self:Dict , **_a:List[Any] ):
return TvltImageProcessor.from_pretrained(self.checkpoint , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , **_a:Tuple ):
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
processor.save_pretrained(self.tmpdirname )
snake_case__ = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , _a )
self.assertIsInstance(processor.image_processor , _a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
snake_case__ = np.ones([1_20_00] )
snake_case__ = feature_extractor(_a , return_tensors='''np''' )
snake_case__ = processor(audio=_a , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
snake_case__ = np.ones([3, 2_24, 2_24] )
snake_case__ = image_processor(_a , return_tensors='''np''' )
snake_case__ = processor(images=_a , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
snake_case__ = np.ones([1_20_00] )
snake_case__ = np.ones([3, 2_24, 2_24] )
snake_case__ = processor(audio=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 33 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase__ : List[str] = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[int] = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[str] = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
lowercase__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Optional[int] = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Optional[int] = 'data2vec-vision'
def __init__( self:int , _a:Tuple=7_68 , _a:int=12 , _a:Any=12 , _a:Optional[int]=30_72 , _a:Optional[int]="gelu" , _a:Any=0.0 , _a:Any=0.0 , _a:List[str]=0.02 , _a:Dict=1e-12 , _a:Tuple=2_24 , _a:Any=16 , _a:str=3 , _a:str=False , _a:Union[str, Any]=False , _a:Optional[int]=False , _a:Any=False , _a:Dict=0.1 , _a:Dict=0.1 , _a:str=True , _a:str=[3, 5, 7, 11] , _a:List[str]=[1, 2, 3, 6] , _a:List[str]=True , _a:Any=0.4 , _a:str=2_56 , _a:Union[str, Any]=1 , _a:int=False , _a:Optional[int]=2_55 , **_a:Dict , ):
super().__init__(**_a )
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = initializer_range
snake_case__ = layer_norm_eps
snake_case__ = image_size
snake_case__ = patch_size
snake_case__ = num_channels
snake_case__ = use_mask_token
snake_case__ = use_absolute_position_embeddings
snake_case__ = use_relative_position_bias
snake_case__ = use_shared_relative_position_bias
snake_case__ = layer_scale_init_value
snake_case__ = drop_path_rate
snake_case__ = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case__ = out_indices
snake_case__ = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case__ = use_auxiliary_head
snake_case__ = auxiliary_loss_weight
snake_case__ = auxiliary_channels
snake_case__ = auxiliary_num_convs
snake_case__ = auxiliary_concat_input
snake_case__ = semantic_loss_ignore_index
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Any = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return 1e-4
| 33 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def A ( __UpperCamelCase ) -> List[List[ImageInput]]:
if isinstance(__UpperCamelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__UpperCamelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__UpperCamelCase ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Any = ["pixel_values"]
def __init__( self : int , _snake_case : bool = True , _snake_case : Dict[str, int] = None , _snake_case : PILImageResampling = PILImageResampling.BILINEAR , _snake_case : bool = True , _snake_case : Dict[str, int] = None , _snake_case : bool = True , _snake_case : Union[int, float] = 1 / 2_55 , _snake_case : bool = True , _snake_case : bool = True , _snake_case : Optional[Union[float, List[float]]] = None , _snake_case : Optional[Union[float, List[float]]] = None , **_snake_case : Tuple , ):
"""simple docstring"""
super().__init__(**_snake_case )
A__ = size if size is not None else {'shortest_edge': 2_56}
A__ = get_size_dict(_snake_case , default_to_square=_snake_case )
A__ = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
A__ = get_size_dict(_snake_case , param_name='crop_size' )
A__ = do_resize
A__ = size
A__ = do_center_crop
A__ = crop_size
A__ = resample
A__ = do_rescale
A__ = rescale_factor
A__ = offset
A__ = do_normalize
A__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _a ( self : Dict , _snake_case : np.ndarray , _snake_case : Dict[str, int] , _snake_case : PILImageResampling = PILImageResampling.BILINEAR , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : int , ):
"""simple docstring"""
A__ = get_size_dict(_snake_case , default_to_square=_snake_case )
if "shortest_edge" in size:
A__ = get_resize_output_image_size(_snake_case , size['shortest_edge'] , default_to_square=_snake_case )
elif "height" in size and "width" in size:
A__ = (size['height'], size['width'])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(_snake_case , size=_snake_case , resample=_snake_case , data_format=_snake_case , **_snake_case )
def _a ( self : Any , _snake_case : np.ndarray , _snake_case : Dict[str, int] , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : str , ):
"""simple docstring"""
A__ = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(_snake_case , size=(size['height'], size['width']) , data_format=_snake_case , **_snake_case )
def _a ( self : Optional[Any] , _snake_case : np.ndarray , _snake_case : Union[int, float] , _snake_case : bool = True , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : str , ):
"""simple docstring"""
A__ = image.astype(np.floataa )
if offset:
A__ = image - (scale / 2)
return rescale(_snake_case , scale=_snake_case , data_format=_snake_case , **_snake_case )
def _a ( self : Optional[int] , _snake_case : np.ndarray , _snake_case : Union[float, List[float]] , _snake_case : Union[float, List[float]] , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : Tuple , ):
"""simple docstring"""
return normalize(_snake_case , mean=_snake_case , std=_snake_case , data_format=_snake_case , **_snake_case )
def _a ( self : List[Any] , _snake_case : ImageInput , _snake_case : bool = None , _snake_case : Dict[str, int] = None , _snake_case : PILImageResampling = None , _snake_case : bool = None , _snake_case : Dict[str, int] = None , _snake_case : bool = None , _snake_case : float = None , _snake_case : bool = None , _snake_case : bool = None , _snake_case : Optional[Union[float, List[float]]] = None , _snake_case : Optional[Union[float, List[float]]] = None , _snake_case : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
if offset and not do_rescale:
raise ValueError('For offset, do_rescale must also be set to True.' )
# All transformations expect numpy arrays.
A__ = to_numpy_array(_snake_case )
if do_resize:
A__ = self.resize(image=_snake_case , size=_snake_case , resample=_snake_case )
if do_center_crop:
A__ = self.center_crop(_snake_case , size=_snake_case )
if do_rescale:
A__ = self.rescale(image=_snake_case , scale=_snake_case , offset=_snake_case )
if do_normalize:
A__ = self.normalize(image=_snake_case , mean=_snake_case , std=_snake_case )
A__ = to_channel_dimension_format(_snake_case , _snake_case )
return image
def _a ( self : Union[str, Any] , _snake_case : ImageInput , _snake_case : bool = None , _snake_case : Dict[str, int] = None , _snake_case : PILImageResampling = None , _snake_case : bool = None , _snake_case : Dict[str, int] = None , _snake_case : bool = None , _snake_case : float = None , _snake_case : bool = None , _snake_case : bool = None , _snake_case : Optional[Union[float, List[float]]] = None , _snake_case : Optional[Union[float, List[float]]] = None , _snake_case : Optional[Union[str, TensorType]] = None , _snake_case : ChannelDimension = ChannelDimension.FIRST , **_snake_case : str , ):
"""simple docstring"""
A__ = do_resize if do_resize is not None else self.do_resize
A__ = resample if resample is not None else self.resample
A__ = do_center_crop if do_center_crop is not None else self.do_center_crop
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = offset if offset is not None else self.offset
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = image_mean if image_mean is not None else self.image_mean
A__ = image_std if image_std is not None else self.image_std
A__ = size if size is not None else self.size
A__ = get_size_dict(_snake_case , default_to_square=_snake_case )
A__ = crop_size if crop_size is not None else self.crop_size
A__ = get_size_dict(_snake_case , param_name='crop_size' )
if not valid_images(_snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
A__ = make_batched(_snake_case )
A__ = [
[
self._preprocess_image(
image=_snake_case , do_resize=_snake_case , size=_snake_case , resample=_snake_case , do_center_crop=_snake_case , crop_size=_snake_case , do_rescale=_snake_case , rescale_factor=_snake_case , offset=_snake_case , do_normalize=_snake_case , image_mean=_snake_case , image_std=_snake_case , data_format=_snake_case , )
for img in video
]
for video in videos
]
A__ = {'pixel_values': videos}
return BatchFeature(data=_snake_case , tensor_type=_snake_case )
| 9 |
import os
import sys
lowerCamelCase__ : Optional[int] = os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowerCamelCase__ : Optional[int] = [
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Any:
return AutoConfig.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[str]:
return AutoTokenizer.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModel.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Tuple:
return AutoModel.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]:
return AutoModelForCausalLM.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[Any]:
return AutoModelForMaskedLM.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[str]:
return AutoModelForSequenceClassification.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]:
return AutoModelForQuestionAnswering.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
| 33 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "deta"
UpperCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Optional[int] , _A : Tuple=None , _A : Dict=900 , _A : Union[str, Any]=2048 , _A : Union[str, Any]=6 , _A : List[str]=2048 , _A : str=8 , _A : Optional[int]=6 , _A : List[str]=1024 , _A : Optional[int]=8 , _A : List[str]=0.0 , _A : List[str]=True , _A : Any="relu" , _A : Any=256 , _A : Optional[int]=0.1 , _A : str=0.0 , _A : Dict=0.0 , _A : str=0.02 , _A : Union[str, Any]=1.0 , _A : Union[str, Any]=True , _A : Any=False , _A : Union[str, Any]="sine" , _A : int=5 , _A : Optional[Any]=4 , _A : Any=4 , _A : Union[str, Any]=True , _A : Dict=300 , _A : List[Any]=True , _A : Any=True , _A : Tuple=1 , _A : Optional[int]=5 , _A : str=2 , _A : Tuple=1 , _A : Tuple=1 , _A : Any=5 , _A : Tuple=2 , _A : str=0.1 , _A : List[str]=0.25 , **_A : Dict , ):
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
_UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(_A , _A ):
_UpperCamelCase = backbone_config.pop('''model_type''' )
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(_A )
_UpperCamelCase = backbone_config
_UpperCamelCase = num_queries
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
# deformable attributes
_UpperCamelCase = num_feature_levels
_UpperCamelCase = encoder_n_points
_UpperCamelCase = decoder_n_points
_UpperCamelCase = two_stage
_UpperCamelCase = two_stage_num_proposals
_UpperCamelCase = with_box_refine
_UpperCamelCase = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
_UpperCamelCase = focal_alpha
super().__init__(is_encoder_decoder=_A , **_A )
@property
def UpperCamelCase_ ( self : Optional[int] ):
return self.encoder_attention_heads
@property
def UpperCamelCase_ ( self : int ):
return self.d_model
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = copy.deepcopy(self.__dict__ )
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
| 10 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : str = (CMStochasticIterativeScheduler,)
__lowercase : List[str] = 10
def SCREAMING_SNAKE_CASE__ ( self:int , **_a:Optional[int] ):
snake_case__ = {
'''num_train_timesteps''': 2_01,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
config.update(**_a )
return config
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = 10
snake_case__ = self.get_scheduler_config()
snake_case__ = self.scheduler_classes[0](**_a )
scheduler.set_timesteps(_a )
snake_case__ = scheduler.timesteps[0]
snake_case__ = scheduler.timesteps[1]
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
snake_case__ = scheduler.step(_a , _a , _a ).prev_sample
snake_case__ = scheduler.step(_a , _a , _a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_a )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=_a )
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = 1
scheduler.set_timesteps(_a )
snake_case__ = scheduler.timesteps
snake_case__ = torch.manual_seed(0 )
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(_a ):
# 1. scale model input
snake_case__ = scheduler.scale_model_input(_a , _a )
# 2. predict noise residual
snake_case__ = model(_a , _a )
# 3. predict previous sample x_t-1
snake_case__ = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
snake_case__ = pred_prev_sample
snake_case__ = torch.sum(torch.abs(_a ) )
snake_case__ = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 192.7614 ) < 1e-2
assert abs(result_mean.item() - 0.2510 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = [1_06, 0]
scheduler.set_timesteps(timesteps=_a )
snake_case__ = scheduler.timesteps
snake_case__ = torch.manual_seed(0 )
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
snake_case__ = scheduler.scale_model_input(_a , _a )
# 2. predict noise residual
snake_case__ = model(_a , _a )
# 3. predict previous sample x_t-1
snake_case__ = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
snake_case__ = pred_prev_sample
snake_case__ = torch.sum(torch.abs(_a ) )
snake_case__ = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 347.6357 ) < 1e-2
assert abs(result_mean.item() - 0.4527 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = [39, 30, 12, 15, 0]
with self.assertRaises(_a , msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_a )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = [39, 30, 12, 1, 0]
snake_case__ = len(_a )
with self.assertRaises(_a , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_a , timesteps=_a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_a , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_a )
| 33 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(A ) , 'Tatoeba directory does not exist.' )
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = tempfile.mkdtemp()
return TatoebaConverter(save_dir=A )
@slow
def a__ (self ) -> Dict:
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def a__ (self ) -> Tuple:
"""simple docstring"""
_a , _a = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=A )
assert mmeta["long_pair"] == "heb-eng"
| 11 |
import numpy as np
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> np.ndarray:
return 1 / (1 + np.exp(-vector ))
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> np.ndarray:
return vector * sigmoid(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33 | 0 |
from __future__ import annotations
from collections import Counter
from random import random
class _snake_case :
def __init__( self):
'''simple docstring'''
lowercase__ : List[Any] = {}
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Optional[int] = {}
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if nodea not in self.connections:
self.add_node(SCREAMING_SNAKE_CASE_)
if nodea not in self.connections:
self.add_node(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = probability
def lowercase__ ( self):
'''simple docstring'''
return list(self.connections)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Dict = 0
lowercase__ : List[Any] = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> dict[str, int]:
'''simple docstring'''
lowercase__ : List[Any] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(lowercase_ , lowercase_ , lowercase_ )
lowercase__ : Union[str, Any] = Counter(graph.get_nodes() )
lowercase__ : Tuple = start
for _ in range(lowercase_ ):
lowercase__ : Optional[Any] = graph.transition(lowercase_ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 |
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 100 ) -> int:
snake_case__ = set()
snake_case__ = 0
snake_case__ = n + 1 # maximum limit
for a in range(2 , __lowerCAmelCase ):
for b in range(2 , __lowerCAmelCase ):
snake_case__ = a**b # calculates the current power
collect_powers.add(__lowerCAmelCase ) # adds the result to the set
return len(__lowerCAmelCase )
if __name__ == "__main__":
print("""Number of terms """, solution(int(str(input()).strip())))
| 33 | 0 |
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
A__ : Any = random.Random()
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict=1.0 , UpperCAmelCase_ : str=None , UpperCAmelCase_ : List[Any]=None ) -> Union[str, Any]:
if rng is None:
__lowerCamelCase : Any = global_rng
__lowerCamelCase : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=4_00 , SCREAMING_SNAKE_CASE_=20_00 , SCREAMING_SNAKE_CASE_=20_48 , SCREAMING_SNAKE_CASE_=1_28 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=4_41_00 , ) -> str:
__lowerCamelCase : Dict = parent
__lowerCamelCase : Union[str, Any] = batch_size
__lowerCamelCase : Any = min_seq_length
__lowerCamelCase : List[Any] = max_seq_length
__lowerCamelCase : List[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowerCamelCase : Any = spectrogram_length
__lowerCamelCase : Any = feature_size
__lowerCamelCase : List[str] = num_audio_channels
__lowerCamelCase : Dict = hop_length
__lowerCamelCase : Optional[Any] = chunk_length
__lowerCamelCase : Optional[Any] = sampling_rate
def lowercase_ ( self ) -> List[str]:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def lowercase_ ( self , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ) -> Dict:
def _flatten(SCREAMING_SNAKE_CASE_ ):
return list(itertools.chain(*SCREAMING_SNAKE_CASE_ ) )
if equal_length:
__lowerCamelCase : Tuple = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__lowerCamelCase : Dict = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowerCamelCase : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ (_UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Dict = TvltFeatureExtractor
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : Any = TvltFeatureExtractionTester(self )
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'spectrogram_length' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'feature_size' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'num_audio_channels' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'hop_length' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'chunk_length' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'sampling_rate' ) )
def lowercase_ ( self ) -> Dict:
__lowerCamelCase : int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase : List[str] = feat_extract_first.save_pretrained(SCREAMING_SNAKE_CASE_ )[0]
check_json_file_has_correct_format(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = self.feature_extraction_class.from_pretrained(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = feat_extract_first.to_dict()
__lowerCamelCase : Optional[int] = feat_extract_second.to_dict()
__lowerCamelCase : Union[str, Any] = dict_first.pop('mel_filters' )
__lowerCamelCase : List[Any] = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Any:
__lowerCamelCase : int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase : int = os.path.join(SCREAMING_SNAKE_CASE_ , 'feat_extract.json' )
feat_extract_first.to_json_file(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = self.feature_extraction_class.from_json_file(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = feat_extract_first.to_dict()
__lowerCamelCase : Optional[Any] = feat_extract_second.to_dict()
__lowerCamelCase : Union[str, Any] = dict_first.pop('mel_filters' )
__lowerCamelCase : str = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> List[str]:
# Initialize feature_extractor
__lowerCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
__lowerCamelCase : int = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__lowerCamelCase : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE_ ) for speech_input in speech_inputs]
# Test not batched input
__lowerCamelCase : Union[str, Any] = feature_extractor(np_speech_inputs[0] , return_tensors='np' , sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
__lowerCamelCase : Any = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' , sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
__lowerCamelCase : Dict = feature_extractor(
SCREAMING_SNAKE_CASE_ , return_tensors='np' , sampling_rate=4_41_00 , mask_audio=SCREAMING_SNAKE_CASE_ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
__lowerCamelCase : Optional[Any] = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
__lowerCamelCase : int = np.asarray(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' , sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
__lowerCamelCase : Union[str, Any] = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
__lowerCamelCase : Union[str, Any] = ds.sort('id' ).select(range(SCREAMING_SNAKE_CASE_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def lowercase_ ( self ) -> Dict:
__lowerCamelCase : Tuple = self._load_datasamples(1 )
__lowerCamelCase : List[Any] = TvltFeatureExtractor()
__lowerCamelCase : List[str] = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_92, 1_28) )
__lowerCamelCase : str = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 13 |
from copy import deepcopy
class __magic_name__ :
'''simple docstring'''
def __init__( self:int , _a:list[int] | None = None , _a:int | None = None ):
if arr is None and size is not None:
snake_case__ = size
snake_case__ = [0] * size
elif arr is not None:
self.init(_a )
else:
raise ValueError('''Either arr or size must be specified''' )
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:list[int] ):
snake_case__ = len(_a )
snake_case__ = deepcopy(_a )
for i in range(1 , self.size ):
snake_case__ = self.next_(_a )
if j < self.size:
self.tree[j] += self.tree[i]
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
snake_case__ = self.next_(_a )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _a:int ):
return index + (index & (-index))
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _a:int ):
return index - (index & (-index))
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:int , _a:int ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
snake_case__ = self.next_(_a )
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:int ):
self.add(_a , value - self.get(_a ) )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:int ):
if right == 0:
return 0
snake_case__ = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
snake_case__ = self.prev(_a )
return result
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:int ):
return self.prefix(_a ) - self.prefix(_a )
def SCREAMING_SNAKE_CASE__ ( self:str , _a:int ):
return self.query(_a , index + 1 )
def SCREAMING_SNAKE_CASE__ ( self:str , _a:int ):
value -= self.tree[0]
if value < 0:
return -1
snake_case__ = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
snake_case__ = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33 | 0 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
_a : Union[str, Any] = 9
_a : str = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_a : List[str] = kruskal(__a ,__a )
_a : Optional[int] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(__a ) == sorted(__a )
| 14 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class __magic_name__ :
'''simple docstring'''
__lowercase : int = BlenderbotConfig
__lowercase : Any = {}
__lowercase : Optional[Any] = 'gelu'
def __init__( self:Tuple , _a:Optional[Any] , _a:Optional[Any]=13 , _a:Tuple=7 , _a:Union[str, Any]=True , _a:int=False , _a:int=99 , _a:Optional[int]=32 , _a:List[str]=2 , _a:List[str]=4 , _a:List[Any]=37 , _a:Any=0.1 , _a:int=0.1 , _a:List[Any]=20 , _a:List[str]=2 , _a:int=1 , _a:Dict=0 , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = eos_token_id
snake_case__ = pad_token_id
snake_case__ = bos_token_id
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case__ = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case__ = prepare_blenderbot_inputs_dict(_a , _a , _a )
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self:int , _a:Optional[Any] , _a:int ):
snake_case__ = TFBlenderbotModel(config=_a ).get_decoder()
snake_case__ = inputs_dict['''input_ids''']
snake_case__ = input_ids[:1, :]
snake_case__ = inputs_dict['''attention_mask'''][:1, :]
snake_case__ = inputs_dict['''head_mask''']
snake_case__ = 1
# first forward pass
snake_case__ = model(_a , attention_mask=_a , head_mask=_a , use_cache=_a )
snake_case__ , snake_case__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
snake_case__ = tf.concat([input_ids, next_tokens] , axis=-1 )
snake_case__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
snake_case__ = model(_a , attention_mask=_a )[0]
snake_case__ = model(_a , attention_mask=_a , past_key_values=_a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
snake_case__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
snake_case__ = output_from_no_past[:, -3:, random_slice_idx]
snake_case__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_a , _a , rtol=1e-3 )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> Tuple:
if attention_mask is None:
snake_case__ = tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : List[str] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__lowercase : Any = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__lowercase : Tuple = (
{
'conversational': TFBlenderbotForConditionalGeneration,
'feature-extraction': TFBlenderbotModel,
'summarization': TFBlenderbotForConditionalGeneration,
'text2text-generation': TFBlenderbotForConditionalGeneration,
'translation': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowercase : Any = True
__lowercase : int = False
__lowercase : int = False
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = TFBlenderbotModelTester(self )
snake_case__ = ConfigTester(self , config_class=_a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_a )
@require_tokenizers
@require_tf
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[int] = ['My friends are cool but they eat too many carbs.']
__lowercase : Optional[int] = 'facebook/blenderbot-400M-distill'
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.tokenizer(self.src_text , return_tensors='''tf''' )
snake_case__ = self.model.generate(
model_inputs.input_ids , )
snake_case__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_a )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 33 | 0 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class A ( unittest.TestCase ):
'''simple docstring'''
@parameterized.expand([(None,), ("""foo.json""",)] )
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = GenerationConfig(
do_sample=_UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_UpperCAmelCase , config_name=_UpperCAmelCase )
lowercase__ = GenerationConfig.from_pretrained(_UpperCAmelCase , config_name=_UpperCAmelCase )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , _UpperCAmelCase )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , _UpperCAmelCase )
def lowerCamelCase__ (self : Any ) -> List[Any]:
"""simple docstring"""
lowercase__ = AutoConfig.from_pretrained("""gpt2""" )
lowercase__ = GenerationConfig.from_model_config(_UpperCAmelCase )
lowercase__ = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def lowerCamelCase__ (self : List[Any] ) -> str:
"""simple docstring"""
lowercase__ = GenerationConfig()
lowercase__ = {
"""max_new_tokens""": 1024,
"""foo""": """bar""",
}
lowercase__ = copy.deepcopy(_UpperCAmelCase )
lowercase__ = generation_config.update(**_UpperCAmelCase )
# update_kwargs was not modified (no side effects)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(_UpperCAmelCase , {"""foo""": """bar"""} )
def lowerCamelCase__ (self : str ) -> Optional[int]:
"""simple docstring"""
lowercase__ = GenerationConfig()
lowercase__ = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(_UpperCAmelCase )
lowercase__ = GenerationConfig.from_pretrained(_UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
lowercase__ = GenerationConfig.from_model_config(_UpperCAmelCase )
assert not hasattr(_UpperCAmelCase , """foo""" ) # no new kwargs should be initialized if from config
def lowerCamelCase__ (self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , _UpperCAmelCase )
self.assertEqual(default_config.num_beams , 1 )
lowercase__ = GenerationConfig(
do_sample=_UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , _UpperCAmelCase )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_UpperCAmelCase )
lowercase__ = GenerationConfig.from_pretrained(_UpperCAmelCase , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , _UpperCAmelCase )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class A ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowerCamelCase__ (cls : int ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = TOKEN
HfFolder.save_token(_UpperCAmelCase )
@classmethod
def lowerCamelCase__ (cls : Tuple ) -> List[Any]:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def lowerCamelCase__ (self : Dict ) -> str:
"""simple docstring"""
lowercase__ = GenerationConfig(
do_sample=_UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
lowercase__ = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_UpperCAmelCase , repo_id="""test-generation-config""" , push_to_hub=_UpperCAmelCase , use_auth_token=self._token )
lowercase__ = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
def lowerCamelCase__ (self : int ) -> Any:
"""simple docstring"""
lowercase__ = GenerationConfig(
do_sample=_UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
lowercase__ = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_UpperCAmelCase , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=_UpperCAmelCase , use_auth_token=self._token )
lowercase__ = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
| 15 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = 0
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:str ):
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
snake_case__ = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
snake_case__ = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
snake_case__ = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
snake_case__ = AutoImageProcessor.from_pretrained(_a ).to_dict()
config_dict.pop('''image_processor_type''' )
snake_case__ = CLIPImageProcessor(**_a )
# save in new folder
model_config.save_pretrained(_a )
config.save_pretrained(_a )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
# make sure private variable is not incorrectly saved
snake_case__ = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
with self.assertRaisesRegex(
_a , '''clip-base is not a local folder and is not a valid model identifier''' ):
snake_case__ = AutoImageProcessor.from_pretrained('''clip-base''' )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
with self.assertRaisesRegex(
_a , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
snake_case__ = AutoImageProcessor.from_pretrained(_a , revision='''aaaaaa''' )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
with self.assertRaisesRegex(
_a , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_a ):
snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
snake_case__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
snake_case__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
snake_case__ = AutoImageProcessor.from_pretrained(_a , trust_remote_code=_a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoImageProcessor.register(_a , _a )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
snake_case__ = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
snake_case__ = CustomImageProcessor.from_pretrained(_a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : List[str] = True
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# If remote code is not set, the default is to use local
snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
snake_case__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
snake_case__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(_a , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 33 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = DanceDiffusionPipeline
lowerCamelCase__ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
lowerCamelCase__ = PipelineTesterMixin.required_optional_params - {
"callback",
"latents",
"callback_steps",
"output_type",
"num_images_per_prompt",
}
lowerCamelCase__ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
lowerCamelCase__ = False
lowerCamelCase__ = False
def _snake_case ( self : str ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=__lowerCamelCase , use_timestep_embedding=__lowerCamelCase , time_embedding_type="fourier" , mid_block_type="UNetMidBlock1D" , down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , )
SCREAMING_SNAKE_CASE = IPNDMScheduler()
SCREAMING_SNAKE_CASE = {
"unet": unet,
"scheduler": scheduler,
}
return components
def _snake_case ( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Dict=0 ):
if str(__lowerCamelCase ).startswith("mps" ):
SCREAMING_SNAKE_CASE = torch.manual_seed(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 4,
}
return inputs
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = DanceDiffusionPipeline(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__lowerCamelCase )
SCREAMING_SNAKE_CASE = pipe(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = output.audios
SCREAMING_SNAKE_CASE = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
SCREAMING_SNAKE_CASE = np.array([-0.7_265, 1.0_000, -0.8_388, 0.1_175, 0.9_498, -1.0_000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def _snake_case ( self : str ):
return super().test_save_load_local()
@skip_mps
def _snake_case ( self : List[str] ):
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def _snake_case ( self : Optional[Any] ):
return super().test_save_load_optional_components()
@skip_mps
def _snake_case ( self : Any ):
return super().test_attention_slicing_forward_pass()
def _snake_case ( self : List[str] ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = torch_device
SCREAMING_SNAKE_CASE = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" )
SCREAMING_SNAKE_CASE = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(generator=__lowerCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 )
SCREAMING_SNAKE_CASE = output.audios
SCREAMING_SNAKE_CASE = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
SCREAMING_SNAKE_CASE = np.array([-0.0_192, -0.0_231, -0.0_318, -0.0_059, 0.0_002, -0.0_020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = torch_device
SCREAMING_SNAKE_CASE = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(generator=__lowerCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 )
SCREAMING_SNAKE_CASE = output.audios
SCREAMING_SNAKE_CASE = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
SCREAMING_SNAKE_CASE = np.array([-0.0_367, -0.0_488, -0.0_771, -0.0_525, -0.0_444, -0.0_341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2 | 16 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ : int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=False ) -> int:
snake_case__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case__ = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ) -> Dict:
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ = ''''''
else:
snake_case__ = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
snake_case__ = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ = in_proj_weight[
: config.hidden_size, :
]
snake_case__ = in_proj_bias[: config.hidden_size]
snake_case__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[Any]:
snake_case__ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
snake_case__ = dct.pop(__lowerCAmelCase )
snake_case__ = val
def SCREAMING_SNAKE_CASE ( ) -> str:
snake_case__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
snake_case__ = ViTConfig()
snake_case__ = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
snake_case__ = True
snake_case__ = int(vit_name[-12:-10] )
snake_case__ = int(vit_name[-9:-6] )
else:
snake_case__ = 1000
snake_case__ = '''huggingface/label-files'''
snake_case__ = '''imagenet-1k-id2label.json'''
snake_case__ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
snake_case__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case__ = idalabel
snake_case__ = {v: k for k, v in idalabel.items()}
snake_case__ = int(vit_name[-6:-4] )
snake_case__ = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('''tiny''' ):
snake_case__ = 192
snake_case__ = 768
snake_case__ = 12
snake_case__ = 3
elif vit_name[9:].startswith('''small''' ):
snake_case__ = 384
snake_case__ = 1536
snake_case__ = 12
snake_case__ = 6
else:
pass
else:
if vit_name[4:].startswith('''small''' ):
snake_case__ = 768
snake_case__ = 2304
snake_case__ = 8
snake_case__ = 8
elif vit_name[4:].startswith('''base''' ):
pass
elif vit_name[4:].startswith('''large''' ):
snake_case__ = 1024
snake_case__ = 4096
snake_case__ = 24
snake_case__ = 16
elif vit_name[4:].startswith('''huge''' ):
snake_case__ = 1280
snake_case__ = 5120
snake_case__ = 32
snake_case__ = 16
# load original model from timm
snake_case__ = timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ = timm_model.state_dict()
if base_model:
remove_classification_head_(__lowerCAmelCase )
snake_case__ = create_rename_keys(__lowerCAmelCase , __lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case__ = ViTModel(__lowerCAmelCase ).eval()
else:
snake_case__ = ViTForImageClassification(__lowerCAmelCase ).eval()
model.load_state_dict(__lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
snake_case__ = DeiTImageProcessor(size=config.image_size )
else:
snake_case__ = ViTImageProcessor(size=config.image_size )
snake_case__ = image_processor(images=prepare_img() , return_tensors='''pt''' )
snake_case__ = encoding['''pixel_values''']
snake_case__ = model(__lowerCAmelCase )
if base_model:
snake_case__ = timm_model.forward_features(__lowerCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__lowerCAmelCase , outputs.pooler_output , atol=1e-3 )
else:
snake_case__ = timm_model(__lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCAmelCase , outputs.logits , atol=1e-3 )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_patch16_224""",
type=str,
help="""Name of the ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowerCamelCase__ : str = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 33 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ : Optional[Any] = {
'''configuration_altclip''': [
'''ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AltCLIPConfig''',
'''AltCLIPTextConfig''',
'''AltCLIPVisionConfig''',
],
'''processing_altclip''': ['''AltCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'''ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AltCLIPPreTrainedModel''',
'''AltCLIPModel''',
'''AltCLIPTextModel''',
'''AltCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
UpperCAmelCase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : List[str] = ['image_processor', 'tokenizer']
__lowercase : str = 'AutoImageProcessor'
__lowercase : Dict = 'AutoTokenizer'
def __init__( self:int , _a:List[str]=None , _a:Optional[Any]=None , **_a:List[str] ):
snake_case__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
snake_case__ = kwargs.pop('''feature_extractor''' )
snake_case__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
snake_case__ = self.image_processor
snake_case__ = False
def __call__( self:Optional[int] , *_a:str , **_a:int ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_a , **_a )
snake_case__ = kwargs.pop('''images''' , _a )
snake_case__ = kwargs.pop('''text''' , _a )
if len(_a ) > 0:
snake_case__ = args[0]
snake_case__ = args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
snake_case__ = self.image_processor(_a , *_a , **_a )
if text is not None:
snake_case__ = self.tokenizer(_a , **_a )
if text is None:
return inputs
elif images is None:
return encodings
else:
snake_case__ = encodings['''input_ids''']
return inputs
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , *_a:Union[str, Any] , **_a:Any ):
return self.tokenizer.batch_decode(*_a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple , *_a:Union[str, Any] , **_a:Optional[int] ):
return self.tokenizer.decode(*_a , **_a )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
snake_case__ = True
snake_case__ = self.tokenizer
yield
snake_case__ = self.image_processor
snake_case__ = False
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Dict , _a:Dict=False , _a:Optional[int]=None ):
if added_vocab is None:
snake_case__ = self.tokenizer.get_added_vocab()
snake_case__ = {}
while tokens:
snake_case__ = re.search(r'''<s_(.*?)>''' , _a , re.IGNORECASE )
if start_token is None:
break
snake_case__ = start_token.group(1 )
snake_case__ = re.search(rF"""</s_{key}>""" , _a , re.IGNORECASE )
snake_case__ = start_token.group()
if end_token is None:
snake_case__ = tokens.replace(_a , '''''' )
else:
snake_case__ = end_token.group()
snake_case__ = re.escape(_a )
snake_case__ = re.escape(_a )
snake_case__ = re.search(F"""{start_token_escaped}(.*?){end_token_escaped}""" , _a , re.IGNORECASE )
if content is not None:
snake_case__ = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
snake_case__ = self.tokenajson(_a , is_inner_value=_a , added_vocab=_a )
if value:
if len(_a ) == 1:
snake_case__ = value[0]
snake_case__ = value
else: # leaf nodes
snake_case__ = []
for leaf in content.split(r'''<sep/>''' ):
snake_case__ = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
snake_case__ = leaf[1:-2] # for categorical special tokens
output[key].append(_a )
if len(output[key] ) == 1:
snake_case__ = output[key][0]
snake_case__ = tokens[tokens.find(_a ) + len(_a ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_a , added_vocab=_a )
if len(_a ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 33 | 0 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
_SCREAMING_SNAKE_CASE = "\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n"
_SCREAMING_SNAKE_CASE = "\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n"
_SCREAMING_SNAKE_CASE = "\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the SQuAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]\n >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def _snake_case ( self ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {"id": datasets.Value("string" ), "prediction_text": datasets.Value("string" )},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> int:
_lowerCAmelCase = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
_lowerCAmelCase = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
_lowerCAmelCase = evaluate(dataset=_lowerCAmelCase , predictions=_lowerCAmelCase )
return score
| 18 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __magic_name__ :
'''simple docstring'''
def __init__( self:Optional[Any] , _a:int , _a:str=3 , _a:Optional[int]=32 , _a:Optional[Any]=3 , _a:Tuple=10 , _a:List[Any]=[8, 16, 32, 64] , _a:str=[1, 1, 2, 1] , _a:Any=True , _a:List[Any]=True , _a:List[str]="relu" , _a:int=3 , _a:Tuple=None , _a:Tuple=["stage2", "stage3", "stage4"] , _a:List[Any]=[2, 3, 4] , _a:Union[str, Any]=1 , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = image_size
snake_case__ = num_channels
snake_case__ = embeddings_size
snake_case__ = hidden_sizes
snake_case__ = depths
snake_case__ = is_training
snake_case__ = use_labels
snake_case__ = hidden_act
snake_case__ = num_labels
snake_case__ = scope
snake_case__ = len(_a )
snake_case__ = out_features
snake_case__ = out_indices
snake_case__ = num_groups
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:Optional[int] , _a:Tuple , _a:int ):
snake_case__ = BitModel(config=_a )
model.to(_a )
model.eval()
snake_case__ = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE__ ( self:int , _a:Tuple , _a:Any , _a:Union[str, Any] ):
snake_case__ = self.num_labels
snake_case__ = BitForImageClassification(_a )
model.to(_a )
model.eval()
snake_case__ = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self:str , _a:str , _a:List[str] , _a:Any ):
snake_case__ = BitBackbone(config=_a )
model.to(_a )
model.eval()
snake_case__ = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
snake_case__ = None
snake_case__ = BitBackbone(config=_a )
model.to(_a )
model.eval()
snake_case__ = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ = config_and_inputs
snake_case__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Any = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
__lowercase : int = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
__lowercase : Tuple = False
__lowercase : Optional[Any] = False
__lowercase : str = False
__lowercase : Tuple = False
__lowercase : Tuple = False
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = BitModelTester(self )
snake_case__ = ConfigTester(self , config_class=_a , has_text_modality=_a )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return
@unittest.skip(reason='''Bit does not output attentions''' )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
pass
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = model_class(_a )
snake_case__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ = [*signature.parameters.keys()]
snake_case__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = model_class(config=_a )
for name, module in model.named_modules():
if isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
def check_hidden_states_output(_a:List[Any] , _a:int , _a:Union[str, Any] ):
snake_case__ = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
snake_case__ = model(**self._prepare_for_class(_a , _a ) )
snake_case__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case__ = self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
snake_case__ = layer_type
snake_case__ = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ = True
check_hidden_states_output(_a , _a , _a )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
pass
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ = BitModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def SCREAMING_SNAKE_CASE ( ) -> Any:
snake_case__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a )
snake_case__ = self.default_image_processor
snake_case__ = prepare_img()
snake_case__ = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
snake_case__ = model(**_a )
# verify the logits
snake_case__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _a )
snake_case__ = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
@require_torch
class __magic_name__ (snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[Any] = (BitBackbone,) if is_torch_available() else ()
__lowercase : int = BitConfig
__lowercase : Any = False
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = BitModelTester(self )
| 33 | 0 |
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = """▁"""
_a = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
}
_a = {
"""vocab_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"""
),
},
"""spm_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"""
)
},
}
_a = {
"""facebook/s2t-small-librispeech-asr""": 1024,
}
_a = ["""pt""", """fr""", """ru""", """nl""", """ro""", """it""", """es""", """de"""]
_a = {"""mustc""": MUSTC_LANGS}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = MAX_MODEL_INPUT_SIZES
lowercase__ = ['input_ids', 'attention_mask']
lowercase__ = []
def __init__( self , __a , __a , __a="<s>" , __a="</s>" , __a="<pad>" , __a="<unk>" , __a=False , __a=False , __a=None , __a=None , __a = None , **__a , ) -> None:
'''simple docstring'''
_UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , pad_token=__a , do_upper_case=__a , do_lower_case=__a , tgt_lang=__a , lang_codes=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
_UpperCamelCase = do_upper_case
_UpperCamelCase = do_lower_case
_UpperCamelCase = load_json(__a)
_UpperCamelCase = {v: k for k, v in self.encoder.items()}
_UpperCamelCase = spm_file
_UpperCamelCase = load_spm(__a , self.sp_model_kwargs)
if lang_codes is not None:
_UpperCamelCase = lang_codes
_UpperCamelCase = LANGUAGES[lang_codes]
_UpperCamelCase = [F'''<lang:{lang}>''' for lang in self.langs]
_UpperCamelCase = {lang: self.sp_model.PieceToId(F'''<lang:{lang}>''') for lang in self.langs}
_UpperCamelCase = self.lang_tokens
_UpperCamelCase = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang)
else:
_UpperCamelCase = {}
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return len(self.encoder)
@property
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return self._tgt_lang
@tgt_lang.setter
def UpperCAmelCase ( self , __a) -> None:
'''simple docstring'''
_UpperCamelCase = new_tgt_lang
self.set_tgt_lang_special_tokens(__a)
def UpperCAmelCase ( self , __a) -> None:
'''simple docstring'''
_UpperCamelCase = self.lang_code_to_id[tgt_lang]
_UpperCamelCase = [lang_code_id]
def UpperCAmelCase ( self , __a) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(__a , out_type=__a)
def UpperCAmelCase ( self , __a) -> Any:
'''simple docstring'''
return self.encoder.get(__a , self.encoder[self.unk_token])
def UpperCAmelCase ( self , __a) -> str:
'''simple docstring'''
return self.decoder.get(__a , self.unk_token)
def UpperCAmelCase ( self , __a) -> str:
'''simple docstring'''
_UpperCamelCase = []
_UpperCamelCase = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
_UpperCamelCase = self.sp_model.decode(__a)
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
_UpperCamelCase = []
else:
current_sub_tokens.append(__a)
_UpperCamelCase = self.sp_model.decode(__a)
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def UpperCAmelCase ( self , __a , __a=None) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self , __a , __a = None , __a = False) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a)
_UpperCamelCase = [1] * len(self.prefix_tokens)
_UpperCamelCase = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__a)) + suffix_ones
return prefix_ones + ([0] * len(__a)) + ([0] * len(__a)) + suffix_ones
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.encoder.copy()
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.__dict__.copy()
_UpperCamelCase = None
return state
def __setstate__( self , __a) -> None:
'''simple docstring'''
_UpperCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
_UpperCamelCase = {}
_UpperCamelCase = load_spm(self.spm_file , self.sp_model_kwargs)
def UpperCAmelCase ( self , __a , __a = None) -> Tuple[str]:
'''simple docstring'''
_UpperCamelCase = Path(__a)
assert save_dir.is_dir(), F'''{save_directory} should be a directory'''
_UpperCamelCase = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
_UpperCamelCase = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , __a)
if os.path.abspath(self.spm_file) != os.path.abspath(__a) and os.path.isfile(self.spm_file):
copyfile(self.spm_file , __a)
elif not os.path.isfile(self.spm_file):
with open(__a , '''wb''') as fi:
_UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(__a)
return (str(__a), str(__a))
def lowerCamelCase__ ( __snake_case, __snake_case ) -> sentencepiece.SentencePieceProcessor:
"""simple docstring"""
_UpperCamelCase = sentencepiece.SentencePieceProcessor(**__snake_case )
spm.Load(str(__snake_case ) )
return spm
def lowerCamelCase__ ( __snake_case ) -> Union[Dict, List]:
"""simple docstring"""
with open(__snake_case, '''r''' ) as f:
return json.load(__snake_case )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> None:
"""simple docstring"""
with open(__snake_case, '''w''' ) as f:
json.dump(__snake_case, __snake_case, indent=2 )
| 19 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
lowerCamelCase__ : Any = """\
"""
lowerCamelCase__ : List[str] = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
lowerCamelCase__ : Any = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __magic_name__ (datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:int , _a:List[Any] , _a:int = 16 , _a:bool = True , _a:Any=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
snake_case__ = '''cuda'''
else:
snake_case__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
snake_case__ = AutoModelForCausalLM.from_pretrained(_a )
snake_case__ = model.to(_a )
snake_case__ = AutoTokenizer.from_pretrained(_a )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
snake_case__ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_a ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
snake_case__ = model.config.max_length - 1
else:
snake_case__ = model.config.max_length
snake_case__ = tokenizer(
_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , return_tensors='''pt''' , return_attention_mask=_a , ).to(_a )
snake_case__ = encodings['''input_ids''']
snake_case__ = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
snake_case__ = []
snake_case__ = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(_a ) , _a ) ):
snake_case__ = min(start_index + batch_size , len(_a ) )
snake_case__ = encoded_texts[start_index:end_index]
snake_case__ = attn_masks[start_index:end_index]
if add_start_token:
snake_case__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_a )
snake_case__ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
snake_case__ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_a ), attn_mask] , dim=1 )
snake_case__ = encoded_batch
with torch.no_grad():
snake_case__ = model(_a , attention_mask=_a ).logits
snake_case__ = out_logits[..., :-1, :].contiguous()
snake_case__ = labels[..., 1:].contiguous()
snake_case__ = attn_mask[..., 1:].contiguous()
snake_case__ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _a ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_a )}
| 33 | 0 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _lowercase( __a : str , __a : int ):
# Load checkpoint
a__ =torch.load(__a , map_location='cpu' )
a__ =chkpt['model']
# We have the base model one level deeper than the original XLM repository
a__ ={}
for k, v in state_dict.items():
if "pred_layer" in k:
a__ =v
else:
a__ =v
a__ =chkpt['params']
a__ ={n: v for n, v in config.items() if not isinstance(__a , (torch.FloatTensor, numpy.ndarray) )}
a__ =chkpt['dico_word2id']
a__ ={s + '</w>' if s.find('@@' ) == -1 and i > 13 else s.replace('@@' , '' ): i for s, i in vocab.items()}
# Save pytorch-model
a__ =pytorch_dump_folder_path + '/' + WEIGHTS_NAME
a__ =pytorch_dump_folder_path + '/' + CONFIG_NAME
a__ =pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['vocab_file']
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(__a , __a )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__a , indent=2 ) + '\n' )
print(f"""Save vocab file to {pytorch_config_dump_path}""" )
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__a , indent=2 ) + '\n' )
if __name__ == "__main__":
_lowerCAmelCase: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_lowerCAmelCase: int = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 20 |
import os
from datetime import datetime as dt
from github import Github
lowerCamelCase__ : int = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
snake_case__ = Github(os.environ['''GITHUB_TOKEN'''] )
snake_case__ = g.get_repo('''huggingface/diffusers''' )
snake_case__ = repo.get_issues(state='''open''' )
for issue in open_issues:
snake_case__ = sorted(issue.get_comments() , key=lambda __lowerCAmelCase : i.created_at , reverse=__lowerCAmelCase )
snake_case__ = comments[0] if len(__lowerCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 33 | 0 |
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ : Optional[int] =1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
__magic_name__ : Union[str, Any] =n - k
# Calculate C(n,k)
for i in range(lowerCamelCase ):
result *= n - i
result //= i + 1
return result
def lowerCAmelCase_ ( lowerCamelCase ):
return binomial_coefficient(2 * node_count , lowerCamelCase ) // (node_count + 1)
def lowerCAmelCase_ ( lowerCamelCase ):
if n < 0:
raise ValueError("""factorial() not defined for negative values""" )
__magic_name__ : List[str] =1
for i in range(1 , n + 1 ):
result *= i
return result
def lowerCAmelCase_ ( lowerCamelCase ):
return catalan_number(lowerCamelCase ) * factorial(lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = int(input("Enter the number of nodes: ").strip() or 0)
if node_count <= 0:
raise ValueError("We need some nodes to work with.")
print(
F"""Given {node_count} nodes, there are {binary_tree_count(node_count)} """
F"""binary trees and {catalan_number(node_count)} binary search trees."""
)
| 21 |
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'''kwargs, expected''' , [
({'''num_shards''': 0, '''max_num_jobs''': 1}, []),
({'''num_shards''': 10, '''max_num_jobs''': 1}, [range(10 )]),
({'''num_shards''': 10, '''max_num_jobs''': 10}, [range(__lowerCAmelCase , i + 1 ) for i in range(10 )]),
({'''num_shards''': 1, '''max_num_jobs''': 10}, [range(1 )]),
({'''num_shards''': 10, '''max_num_jobs''': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'''num_shards''': 3, '''max_num_jobs''': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
snake_case__ = _distribute_shards(**__lowerCAmelCase )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, max_num_jobs, expected''' , [
({'''foo''': 0}, 10, [{'''foo''': 0}]),
({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]),
({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]),
({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]),
({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]),
] , )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
snake_case__ = _split_gen_kwargs(__lowerCAmelCase , __lowerCAmelCase )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, expected''' , [
({'''foo''': 0}, 1),
({'''shards''': [0]}, 1),
({'''shards''': [0, 1, 2, 3]}, 4),
({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4),
({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4),
({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError),
] , )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
if expected is RuntimeError:
with pytest.raises(__lowerCAmelCase ):
_number_of_shards_in_gen_kwargs(__lowerCAmelCase )
else:
snake_case__ = _number_of_shards_in_gen_kwargs(__lowerCAmelCase )
assert out == expected
| 33 | 0 |
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def snake_case_ (UpperCamelCase : List[str] , UpperCamelCase : str , UpperCamelCase : int , UpperCamelCase : Optional[int]=5 ):
'''simple docstring'''
assert masked_input.count('''<mask>''' ) == 1
_a = torch.tensor(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ).unsqueeze(0 ) # Batch size 1
_a = model(UpperCamelCase )[0] # The last hidden-state is the first element of the output tuple
_a = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
_a = logits[0, masked_index, :]
_a = logits.softmax(dim=0 )
_a , _a = prob.topk(k=UpperCamelCase , dim=0 )
_a = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(UpperCamelCase ) )] )
_a = tokenizer.mask_token
_a = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ):
_a = predicted_token_bpe.replace('''\u2581''' , ''' ''' )
if " {0}".format(UpperCamelCase ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(UpperCamelCase ) , UpperCamelCase ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(UpperCamelCase , UpperCamelCase ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
_snake_case : Optional[Any] = CamembertTokenizer.from_pretrained('camembert-base')
_snake_case : str = CamembertForMaskedLM.from_pretrained('camembert-base')
model.eval()
_snake_case : str = 'Le camembert est <mask> :)'
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 22 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : str = IFImgaImgSuperResolutionPipeline
__lowercase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'}
__lowercase : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'} )
__lowercase : List[str] = PipelineTesterMixin.required_optional_params - {'latents'}
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
return self._get_superresolution_dummy_components()
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:Optional[Any]=0 ):
if str(_a ).startswith('''mps''' ):
snake_case__ = torch.manual_seed(_a )
else:
snake_case__ = torch.Generator(device=_a ).manual_seed(_a )
snake_case__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
snake_case__ = floats_tensor((1, 3, 16, 16) , rng=random.Random(_a ) ).to(_a )
snake_case__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self:str ):
self._test_save_load_local()
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 33 | 0 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
snake_case__ : Union[str, Any] = logging.get_logger(__name__)
snake_case__ : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
snake_case__ : Optional[int] = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
snake_case__ : Tuple = {
"""allenai/led-base-16384""": 1_6_3_8_4,
}
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = LEDTokenizer
A_ = ["""input_ids""", """attention_mask"""]
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="replace" , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=False , _UpperCAmelCase=True , **_UpperCAmelCase , ) -> Tuple:
super().__init__(
_UpperCAmelCase , _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , errors=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase , **_UpperCAmelCase , )
UpperCamelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , _UpperCAmelCase ) != add_prefix_space:
UpperCamelCase_ = getattr(_UpperCAmelCase , pre_tok_state.pop('type' ) )
UpperCamelCase_ = add_prefix_space
UpperCamelCase_ = pre_tok_class(**_UpperCAmelCase )
UpperCamelCase_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCamelCase_ = 'post_processor'
UpperCamelCase_ = getattr(self.backend_tokenizer , _UpperCAmelCase , _UpperCAmelCase )
if tokenizer_component_instance:
UpperCamelCase_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCamelCase_ = tuple(state['sep'] )
if "cls" in state:
UpperCamelCase_ = tuple(state['cls'] )
UpperCamelCase_ = False
if state.get('add_prefix_space' , _UpperCAmelCase ) != add_prefix_space:
UpperCamelCase_ = add_prefix_space
UpperCamelCase_ = True
if state.get('trim_offsets' , _UpperCAmelCase ) != trim_offsets:
UpperCamelCase_ = trim_offsets
UpperCamelCase_ = True
if changes_to_apply:
UpperCamelCase_ = getattr(_UpperCAmelCase , state.pop('type' ) )
UpperCamelCase_ = component_class(**_UpperCAmelCase )
setattr(self.backend_tokenizer , _UpperCAmelCase , _UpperCAmelCase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _UpperCAmelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Union[str, Any]:
UpperCamelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else value
UpperCamelCase_ = value
def _UpperCAmelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> BatchEncoding:
UpperCamelCase_ = kwargs.get('is_split_into_words' , _UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> BatchEncoding:
UpperCamelCase_ = kwargs.get('is_split_into_words' , _UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> Tuple[str]:
UpperCamelCase_ = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase=None ) -> Optional[Any]:
UpperCamelCase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> List[int]:
UpperCamelCase_ = [self.sep_token_id]
UpperCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = PaddingStrategy.DO_NOT_PAD , _UpperCAmelCase = None , _UpperCAmelCase = None , ) -> dict:
UpperCamelCase_ = super()._pad(
encoded_inputs=_UpperCAmelCase , max_length=_UpperCAmelCase , padding_strategy=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
UpperCamelCase_ = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCamelCase_ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCamelCase_ = len(encoded_inputs['global_attention_mask'] ) != len(_UpperCAmelCase )
if needs_to_be_padded:
UpperCamelCase_ = len(_UpperCAmelCase ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCamelCase_ = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
UpperCamelCase_ = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 23 |
import math
class __magic_name__ :
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:list[list[float]] , _a:list[int] ):
snake_case__ = 0.0
snake_case__ = 0.0
for i in range(len(_a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:list[list[int | float]] , _a:list[int] , _a:int , _a:float ):
for i in range(len(_a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def SCREAMING_SNAKE_CASE ( ) -> None:
# Training Examples ( m, n )
snake_case__ = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
snake_case__ = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
snake_case__ = SelfOrganizingMap()
snake_case__ = 3
snake_case__ = 0.5
for _ in range(__lowerCAmelCase ):
for j in range(len(__lowerCAmelCase ) ):
# training sample
snake_case__ = training_samples[j]
# Compute the winning vector
snake_case__ = self_organizing_map.get_winner(__lowerCAmelCase , __lowerCAmelCase )
# Update the winning vector
snake_case__ = self_organizing_map.update(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# classify test sample
snake_case__ = [0, 0, 0, 1]
snake_case__ = self_organizing_map.get_winner(__lowerCAmelCase , __lowerCAmelCase )
# results
print(F"""Clusters that the test sample belongs to : {winner}""" )
print(F"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 33 | 0 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class lowerCAmelCase :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=19 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ) -> List[Any]:
'''simple docstring'''
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = ids_tensor([self.batch_size] , self.num_choices )
__snake_case = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__SCREAMING_SNAKE_CASE , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , )
return config
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
__snake_case = EsmForProteinFolding(config=__SCREAMING_SNAKE_CASE ).float()
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__snake_case = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
__snake_case = model(__SCREAMING_SNAKE_CASE )
__snake_case = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase):
__lowercase : List[str] = False
__lowercase : Dict = (EsmForProteinFolding,) if is_torch_available() else ()
__lowercase : Union[str, Any] = ()
__lowercase : List[str] = {} if is_torch_available() else {}
__lowercase : List[Any] = False
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = EsmFoldModelTester(self )
__snake_case = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
@unittest.skip('''Does not support attention outputs''' )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip('''ESMFold only has one output format.''' )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
pass
@require_torch
class lowerCAmelCase ( __lowerCAmelCase):
@slow
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
__snake_case = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__snake_case = model(__SCREAMING_SNAKE_CASE )['''positions''']
__snake_case = torch.tensor([2.5_828, 0.7_993, -10.9_334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 24 |
from __future__ import annotations
from statistics import mean
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list[int]:
snake_case__ = [0] * no_of_processes
snake_case__ = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(__lowerCAmelCase ):
snake_case__ = burst_time[i]
snake_case__ = []
snake_case__ = 0
snake_case__ = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
snake_case__ = []
snake_case__ = -1
for i in range(__lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
snake_case__ = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
snake_case__ = i
total_time += burst_time[target_process]
completed += 1
snake_case__ = 0
snake_case__ = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list[int]:
snake_case__ = [0] * no_of_processes
for i in range(__lowerCAmelCase ):
snake_case__ = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
lowerCamelCase__ : Tuple = 4
lowerCamelCase__ : Union[str, Any] = [2, 5, 3, 7]
lowerCamelCase__ : Optional[Any] = [0, 0, 0, 0]
lowerCamelCase__ : Dict = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowerCamelCase__ : Union[str, Any] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 33 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='swin2sr'
lowerCamelCase__ ={
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Any , a : List[Any]=64 , a : List[str]=1 , a : int=3 , a : Union[str, Any]=180 , a : Union[str, Any]=[6, 6, 6, 6, 6, 6] , a : Union[str, Any]=[6, 6, 6, 6, 6, 6] , a : Any=8 , a : List[Any]=2.0 , a : List[Any]=True , a : Optional[Any]=0.0 , a : Union[str, Any]=0.0 , a : Union[str, Any]=0.1 , a : List[Any]="gelu" , a : Any=False , a : Any=0.02 , a : Tuple=1e-5 , a : Optional[int]=2 , a : List[str]=1.0 , a : int="1conv" , a : Dict="pixelshuffle" , **a : str , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**a )
SCREAMING_SNAKE_CASE : Tuple = image_size
SCREAMING_SNAKE_CASE : int = patch_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : Tuple = embed_dim
SCREAMING_SNAKE_CASE : Dict = depths
SCREAMING_SNAKE_CASE : Tuple = len(a )
SCREAMING_SNAKE_CASE : Tuple = num_heads
SCREAMING_SNAKE_CASE : Dict = window_size
SCREAMING_SNAKE_CASE : Optional[int] = mlp_ratio
SCREAMING_SNAKE_CASE : str = qkv_bias
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = drop_path_rate
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = use_absolute_embeddings
SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Any = upscale
SCREAMING_SNAKE_CASE : int = img_range
SCREAMING_SNAKE_CASE : List[Any] = resi_connection
SCREAMING_SNAKE_CASE : List[str] = upsampler | 25 |
lowerCamelCase__ : List[str] = """Alexander Joslin"""
import operator as op
from .stack import Stack
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
snake_case__ = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
snake_case__ = Stack()
snake_case__ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__lowerCAmelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(__lowerCAmelCase )
elif i == ")":
# RULE 4
snake_case__ = operator_stack.peek()
operator_stack.pop()
snake_case__ = operand_stack.peek()
operand_stack.pop()
snake_case__ = operand_stack.peek()
operand_stack.pop()
snake_case__ = operators[opr](__lowerCAmelCase , __lowerCAmelCase )
operand_stack.push(__lowerCAmelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase__ : Optional[Any] = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 33 | 0 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__UpperCamelCase = "bart"
__UpperCamelCase = True
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__snake_case : int = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
__snake_case : Tuple = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
__snake_case : List[Any] = qar_model.eval()
else:
__snake_case , __snake_case : Optional[Any] = (None, None)
if MODEL_TYPE == "bart":
__snake_case : List[str] = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
__snake_case : Any = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
__snake_case : int = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
__snake_case : int = sas_model.eval()
else:
__snake_case , __snake_case : Dict = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> Tuple:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__snake_case : Tuple = faiss.StandardGpuResources()
__snake_case : Optional[Any] = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
__snake_case : str = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
__snake_case : Optional[int] = faiss.IndexFlatIP(128 )
__snake_case : Any = faiss.index_cpu_to_gpu(_lowerCamelCase , 1 , _lowerCamelCase )
wikiaab_gpu_index_flat.add(_lowerCamelCase ) # TODO fix for larger GPU
else:
__snake_case , __snake_case : Tuple = (None, None)
__snake_case : List[str] = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
__snake_case : Dict = elia["""train_eli5"""]
__snake_case : int = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
__snake_case : Dict = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_lowerCamelCase )
return (elia_train, eli5_train_q_index)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_indexes()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_models()
__UpperCamelCase , __UpperCamelCase = load_train_data()
def _a ( _lowerCamelCase , _lowerCamelCase=10 ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = embed_questions_for_retrieval([question] , _lowerCamelCase , _lowerCamelCase )
__snake_case , __snake_case : Tuple = eli5_train_q_index.search(_lowerCamelCase , _lowerCamelCase )
__snake_case : Tuple = [elia_train[int(_lowerCamelCase )] for i in I[0]]
return nn_examples
def _a ( _lowerCamelCase , _lowerCamelCase="wiki40b" , _lowerCamelCase="dense" , _lowerCamelCase=10 ) -> Optional[Any]:
"""simple docstring"""
if source == "none":
__snake_case , __snake_case : Dict = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__snake_case , __snake_case : Dict = query_qa_dense_index(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
__snake_case , __snake_case : str = query_es_index(
_lowerCamelCase , _lowerCamelCase , index_name="""english_wiki40b_snippets_100w""" , n_results=_lowerCamelCase , )
__snake_case : Optional[int] = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
__snake_case : Optional[Any] = """question: {} context: {}""".format(_lowerCamelCase , _lowerCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowerCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowerCamelCase : None),
} )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=64 , _lowerCamelCase=256 , _lowerCamelCase=False , _lowerCamelCase=2 , _lowerCamelCase=0.95 , _lowerCamelCase=0.8 ) -> List[str]:
"""simple docstring"""
with torch.no_grad():
__snake_case : Union[str, Any] = qa_sas_generate(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , num_answers=1 , num_beams=_lowerCamelCase , min_len=_lowerCamelCase , max_len=_lowerCamelCase , do_sample=_lowerCamelCase , temp=_lowerCamelCase , top_p=_lowerCamelCase , top_k=_lowerCamelCase , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
__UpperCamelCase = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
__UpperCamelCase = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__UpperCamelCase = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
__UpperCamelCase = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
__UpperCamelCase = st.sidebar.checkbox("Demo options")
if demo_options:
__UpperCamelCase = st.sidebar.selectbox(
"",
action_list,
index=3,
)
__UpperCamelCase = action_list.index(action_st)
__UpperCamelCase = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
__UpperCamelCase = show_type == "Show full text of passages"
else:
__UpperCamelCase = 3
__UpperCamelCase = True
__UpperCamelCase = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
__UpperCamelCase = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
__UpperCamelCase = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
__UpperCamelCase = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
__UpperCamelCase = "wiki40b"
__UpperCamelCase = "dense"
__UpperCamelCase = "beam"
__UpperCamelCase = 2
__UpperCamelCase = 64
__UpperCamelCase = 256
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = st.sidebar.checkbox("Generation options")
if generate_options:
__UpperCamelCase = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
__UpperCamelCase = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
__UpperCamelCase = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__UpperCamelCase = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__UpperCamelCase = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__UpperCamelCase = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__UpperCamelCase = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__UpperCamelCase = None
# start main text
__UpperCamelCase = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
__UpperCamelCase = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__UpperCamelCase = st.text_input("Enter your question here:", "")
else:
__UpperCamelCase = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method="dense", n_results=10)
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method="sparse", n_results=10)
__UpperCamelCase = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__UpperCamelCase = support_list[:10]
__UpperCamelCase = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__UpperCamelCase , __UpperCamelCase = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
__UpperCamelCase = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
__UpperCamelCase = res[1].strip()
if sec_titles == "":
__UpperCamelCase = "[{}]({})".format(res[0], wiki_url)
else:
__UpperCamelCase = sec_titles.split(" & ")
__UpperCamelCase = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
__UpperCamelCase = find_nearest_training(question)
__UpperCamelCase = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
__UpperCamelCase = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
__UpperCamelCase = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 26 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowerCamelCase__ : int = logging.get_logger(__name__)
class __magic_name__ (snake_case_ ):
'''simple docstring'''
def __init__( self:List[Any] , *_a:Dict , **_a:Tuple ):
warnings.warn(
'''The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PerceiverImageProcessor instead.''' , _a , )
super().__init__(*_a , **_a )
| 33 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
__A : Any = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def __lowerCAmelCase( ) -> Any:
"""simple docstring"""
_A = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_A = get_sagemaker_input()
else:
_A = get_cluster_input()
return config
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE=None ) -> Dict:
"""simple docstring"""
if subparsers is not None:
_A = subparsers.add_parser('config' , description=_SCREAMING_SNAKE_CASE )
else:
_A = argparse.ArgumentParser('Accelerate config command' , description=_SCREAMING_SNAKE_CASE )
parser.add_argument(
'--config_file' , default=_SCREAMING_SNAKE_CASE , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
return parser
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
_A = get_user_input()
if args.config_file is not None:
_A = args.config_file
else:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
_A = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(_SCREAMING_SNAKE_CASE )
else:
config.to_yaml_file(_SCREAMING_SNAKE_CASE )
print(F"accelerate configuration saved at {config_file}" )
def __lowerCAmelCase( ) -> Union[str, Any]:
"""simple docstring"""
_A = config_command_parser()
_A = parser.parse_args()
config_command(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 27 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ : Tuple = {
"""configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""],
"""tokenization_roberta""": ["""RobertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = ["""RobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[int] = [
"""ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaForCausalLM""",
"""RobertaForMaskedLM""",
"""RobertaForMultipleChoice""",
"""RobertaForQuestionAnswering""",
"""RobertaForSequenceClassification""",
"""RobertaForTokenClassification""",
"""RobertaModel""",
"""RobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : List[str] = [
"""TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaForCausalLM""",
"""TFRobertaForMaskedLM""",
"""TFRobertaForMultipleChoice""",
"""TFRobertaForQuestionAnswering""",
"""TFRobertaForSequenceClassification""",
"""TFRobertaForTokenClassification""",
"""TFRobertaMainLayer""",
"""TFRobertaModel""",
"""TFRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : str = [
"""FlaxRobertaForCausalLM""",
"""FlaxRobertaForMaskedLM""",
"""FlaxRobertaForMultipleChoice""",
"""FlaxRobertaForQuestionAnswering""",
"""FlaxRobertaForSequenceClassification""",
"""FlaxRobertaForTokenClassification""",
"""FlaxRobertaModel""",
"""FlaxRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 33 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
self.test()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Optional[int] = False
while not completed:
if counter == 1:
self.reset()
SCREAMING_SNAKE_CASE : str = self.advance()
if not self.does_advance(A ):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.update(A )
counter += 1
if counter > 10_000:
raise Exception('update() does not fulfill the constraint.' )
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.' )
@abstractmethod
def UpperCamelCase_ ( self ):
'''simple docstring'''
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase_ ( self ):
'''simple docstring'''
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase_ ( self ):
'''simple docstring'''
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase_ ( self, A=False ):
'''simple docstring'''
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
super(A, self ).__init__()
if not isinstance(A, A ) or len(A ) == 0:
raise ValueError(F"`token_ids` has to be a non-empty list, but is {token_ids}." )
if any((not isinstance(A, A ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F"Each list in `token_ids` has to be a list of positive integers, but is {token_ids}." )
SCREAMING_SNAKE_CASE : Tuple = token_ids
SCREAMING_SNAKE_CASE : Tuple = len(self.token_ids )
SCREAMING_SNAKE_CASE : Optional[int] = -1 # the index of the currently fulfilled step
SCREAMING_SNAKE_CASE : Any = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if not isinstance(A, A ):
raise ValueError(F"`token_id` has to be an `int`, but is {token_id} of type {type(A )}" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if not isinstance(A, A ):
raise ValueError(F"`token_id` has to be an `int`, but is {token_id} of type {type(A )}" )
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : int = False
if self.does_advance(A ):
self.fulfilled_idx += 1
SCREAMING_SNAKE_CASE : int = True
if self.fulfilled_idx == (self.seqlen - 1):
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : Union[str, Any] = completed
else:
# failed to make progress.
SCREAMING_SNAKE_CASE : int = True
self.reset()
return stepped, completed, reset
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : Dict = 0
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.seqlen - (self.fulfilled_idx + 1)
def UpperCamelCase_ ( self, A=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = PhrasalConstraint(self.token_ids )
if stateful:
SCREAMING_SNAKE_CASE : str = self.seqlen
SCREAMING_SNAKE_CASE : List[Any] = self.fulfilled_idx
SCREAMING_SNAKE_CASE : List[Any] = self.completed
return new_constraint
class _a :
'''simple docstring'''
def __init__( self, A, A=True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = max([len(A ) for one in nested_token_ids] )
SCREAMING_SNAKE_CASE : Optional[int] = {}
for token_ids in nested_token_ids:
SCREAMING_SNAKE_CASE : Dict = root
for tidx, token_id in enumerate(A ):
if token_id not in level:
SCREAMING_SNAKE_CASE : Dict = {}
SCREAMING_SNAKE_CASE : Optional[Any] = level[token_id]
if no_subsets and self.has_subsets(A, A ):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
F" {nested_token_ids}." )
SCREAMING_SNAKE_CASE : Any = root
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.trie
for current_token in current_seq:
SCREAMING_SNAKE_CASE : Optional[Any] = start[current_token]
SCREAMING_SNAKE_CASE : Optional[Any] = list(start.keys() )
return next_tokens
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.next_tokens(A )
return len(A ) == 0
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = list(root.values() )
if len(A ) == 0:
return 1
else:
return sum([self.count_leaves(A ) for nn in next_nodes] )
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.count_leaves(A )
return len(A ) != leaf_count
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
super(A, self ).__init__()
if not isinstance(A, A ) or len(A ) == 0:
raise ValueError(F"`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}." )
if any(not isinstance(A, A ) for token_ids in nested_token_ids ):
raise ValueError(F"`nested_token_ids` has to be a list of lists, but is {nested_token_ids}." )
if any(
any((not isinstance(A, A ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F"Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}." )
SCREAMING_SNAKE_CASE : Dict = DisjunctiveTrie(A )
SCREAMING_SNAKE_CASE : int = nested_token_ids
SCREAMING_SNAKE_CASE : int = self.trie.max_height
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : List[str] = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.trie.next_tokens(self.current_seq )
if len(A ) == 0:
return None
else:
return token_list
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if not isinstance(A, A ):
raise ValueError(F"`token_id` is supposed to be type `int`, but is {token_id} of type {type(A )}" )
SCREAMING_SNAKE_CASE : List[str] = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if not isinstance(A, A ):
raise ValueError(F"`token_id` is supposed to be type `int`, but is {token_id} of type {type(A )}" )
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Optional[Any] = False
if self.does_advance(A ):
self.current_seq.append(A )
SCREAMING_SNAKE_CASE : Tuple = True
else:
SCREAMING_SNAKE_CASE : Dict = True
self.reset()
SCREAMING_SNAKE_CASE : int = self.trie.reached_leaf(self.current_seq )
SCREAMING_SNAKE_CASE : List[str] = completed
return stepped, completed, reset
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : Optional[int] = []
def UpperCamelCase_ ( self ):
'''simple docstring'''
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def UpperCamelCase_ ( self, A=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(self.token_ids )
if stateful:
SCREAMING_SNAKE_CASE : Tuple = self.seqlen
SCREAMING_SNAKE_CASE : Dict = self.current_seq
SCREAMING_SNAKE_CASE : str = self.completed
return new_constraint
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = constraints
# max # of steps required to fulfill a given constraint
SCREAMING_SNAKE_CASE : List[str] = max([c.seqlen for c in constraints] )
SCREAMING_SNAKE_CASE : str = len(A )
SCREAMING_SNAKE_CASE : Any = False
self.init_state()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : str = [constraint.copy(stateful=A ) for constraint in self.constraints]
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
SCREAMING_SNAKE_CASE : List[str] = constraint.advance()
if isinstance(A, A ):
token_list.append(A )
elif isinstance(A, A ):
token_list.extend(A )
else:
SCREAMING_SNAKE_CASE : List[Any] = self.inprogress_constraint.advance()
if isinstance(A, A ):
token_list.append(A )
elif isinstance(A, A ):
token_list.extend(A )
if len(A ) == 0:
return None
else:
return token_list
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.add(A )
# the entire list of constraints are fulfilled
if self.completed:
break
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if not isinstance(A, A ):
raise ValueError(F"`token_id` should be an `int`, but is `{token_id}`." )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = False, False
if self.completed:
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : Dict = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.inprogress_constraint.update(A )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=A ) )
SCREAMING_SNAKE_CASE : Dict = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
SCREAMING_SNAKE_CASE : Any = None
if len(self.pending_constraints ) == 0:
# we're done!
SCREAMING_SNAKE_CASE : Optional[int] = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(A ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = pending_constraint.update(A )
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.' )
if complete:
self.complete_constraints.append(A )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if not complete and stepped:
SCREAMING_SNAKE_CASE : Union[str, Any] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
SCREAMING_SNAKE_CASE : Any = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
SCREAMING_SNAKE_CASE : Dict = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def UpperCamelCase_ ( self, A=True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
SCREAMING_SNAKE_CASE : int = [
constraint.copy(stateful=A ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
SCREAMING_SNAKE_CASE : List[str] = self.inprogress_constraint.copy(stateful=A )
SCREAMING_SNAKE_CASE : Optional[Any] = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 28 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[Any]:
snake_case__ = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __magic_name__ (snake_case_ ,snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Dict = StableDiffusionLatentUpscalePipeline
__lowercase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
__lowercase : List[Any] = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
__lowercase : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowercase : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__lowercase : List[Any] = frozenset([] )
__lowercase : Any = True
@property
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = 1
snake_case__ = 4
snake_case__ = (16, 16)
snake_case__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_a )
return image
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
torch.manual_seed(0 )
snake_case__ = UNetaDConditionModel(
act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=_a , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) , in_channels=8 , mid_block_type=_a , only_cross_attention=_a , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , )
snake_case__ = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
snake_case__ = EulerDiscreteScheduler(prediction_type='''sample''' )
snake_case__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''quick_gelu''' , projection_dim=5_12 , )
snake_case__ = CLIPTextModel(_a )
snake_case__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case__ = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:Optional[Any] , _a:List[str]=0 ):
if str(_a ).startswith('''mps''' ):
snake_case__ = torch.manual_seed(_a )
else:
snake_case__ = torch.Generator(device=_a ).manual_seed(_a )
snake_case__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = '''cpu'''
snake_case__ = self.get_dummy_components()
snake_case__ = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = self.get_dummy_inputs(_a )
snake_case__ = pipe(**_a ).images
snake_case__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_56, 2_56, 3) )
snake_case__ = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
snake_case__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_a , 1e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:str ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
super().test_save_load_local(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:str ):
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
snake_case__ = self.get_dummy_components()
snake_case__ = self.pipeline_class(**_a )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = self.get_dummy_inputs(_a )
snake_case__ = 2
snake_case__ = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
snake_case__ = getattr(_a , scheduler_enum.name )
snake_case__ = scheduler_cls.from_config(pipe.scheduler.config )
snake_case__ = pipe(**_a )[0]
outputs.append(_a )
assert check_same_shape(_a )
@require_torch_gpu
@slow
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = torch.manual_seed(33 )
snake_case__ = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
snake_case__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
snake_case__ = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
snake_case__ = pipe(_a , generator=_a , output_type='''latent''' ).images
snake_case__ = upscaler(
prompt=_a , image=_a , num_inference_steps=20 , guidance_scale=0 , generator=_a , output_type='''np''' , ).images[0]
snake_case__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5e-2
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = torch.manual_seed(33 )
snake_case__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
snake_case__ = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
snake_case__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
snake_case__ = upscaler(
prompt=_a , image=_a , num_inference_steps=20 , guidance_scale=0 , generator=_a , output_type='''np''' , ).images[0]
snake_case__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5e-2
| 33 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = '''ZinengTang/tvlt-base'''
snake_case__ = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE__ ( self:Dict , **_a:List[Any] ):
return TvltImageProcessor.from_pretrained(self.checkpoint , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , **_a:Tuple ):
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
processor.save_pretrained(self.tmpdirname )
snake_case__ = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , _a )
self.assertIsInstance(processor.image_processor , _a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
snake_case__ = np.ones([1_20_00] )
snake_case__ = feature_extractor(_a , return_tensors='''np''' )
snake_case__ = processor(audio=_a , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
snake_case__ = np.ones([3, 2_24, 2_24] )
snake_case__ = image_processor(_a , return_tensors='''np''' )
snake_case__ = processor(images=_a , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
snake_case__ = np.ones([1_20_00] )
snake_case__ = np.ones([3, 2_24, 2_24] )
snake_case__ = processor(audio=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 33 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
'configuration_luke': ['LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LukeConfig'],
'tokenization_luke': ['LukeTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'LUKE_PRETRAINED_MODEL_ARCHIVE_LIST',
'LukeForEntityClassification',
'LukeForEntityPairClassification',
'LukeForEntitySpanClassification',
'LukeForMultipleChoice',
'LukeForQuestionAnswering',
'LukeForSequenceClassification',
'LukeForTokenClassification',
'LukeForMaskedLM',
'LukeModel',
'LukePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 30 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Optional[int] = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Optional[int] = 'data2vec-vision'
def __init__( self:int , _a:Tuple=7_68 , _a:int=12 , _a:Any=12 , _a:Optional[int]=30_72 , _a:Optional[int]="gelu" , _a:Any=0.0 , _a:Any=0.0 , _a:List[str]=0.02 , _a:Dict=1e-12 , _a:Tuple=2_24 , _a:Any=16 , _a:str=3 , _a:str=False , _a:Union[str, Any]=False , _a:Optional[int]=False , _a:Any=False , _a:Dict=0.1 , _a:Dict=0.1 , _a:str=True , _a:str=[3, 5, 7, 11] , _a:List[str]=[1, 2, 3, 6] , _a:List[str]=True , _a:Any=0.4 , _a:str=2_56 , _a:Union[str, Any]=1 , _a:int=False , _a:Optional[int]=2_55 , **_a:Dict , ):
super().__init__(**_a )
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = initializer_range
snake_case__ = layer_norm_eps
snake_case__ = image_size
snake_case__ = patch_size
snake_case__ = num_channels
snake_case__ = use_mask_token
snake_case__ = use_absolute_position_embeddings
snake_case__ = use_relative_position_bias
snake_case__ = use_shared_relative_position_bias
snake_case__ = layer_scale_init_value
snake_case__ = drop_path_rate
snake_case__ = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case__ = out_indices
snake_case__ = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case__ = use_auxiliary_head
snake_case__ = auxiliary_loss_weight
snake_case__ = auxiliary_channels
snake_case__ = auxiliary_num_convs
snake_case__ = auxiliary_concat_input
snake_case__ = semantic_loss_ignore_index
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Any = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return 1e-4
| 33 | 0 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = jnp.ones((batch_size, length) ) / length
return scores
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = 20
SCREAMING_SNAKE_CASE_ = self._get_uniform_logits(batch_size=2 , length=_lowerCAmelCase )
# tweak scores to not be uniform anymore
SCREAMING_SNAKE_CASE_ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
SCREAMING_SNAKE_CASE_ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
SCREAMING_SNAKE_CASE_ = jax.nn.softmax(_lowerCAmelCase , axis=-1 )
SCREAMING_SNAKE_CASE_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
SCREAMING_SNAKE_CASE_ = FlaxTemperatureLogitsWarper(temperature=1.3 )
SCREAMING_SNAKE_CASE_ = jax.nn.softmax(temp_dist_warper_sharper(_lowerCAmelCase , scores.copy() , cur_len=_lowerCAmelCase ) , axis=-1 )
SCREAMING_SNAKE_CASE_ = jax.nn.softmax(temp_dist_warper_smoother(_lowerCAmelCase , scores.copy() , cur_len=_lowerCAmelCase ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = 10
SCREAMING_SNAKE_CASE_ = 2
# create ramp distribution
SCREAMING_SNAKE_CASE_ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] , (batch_size, vocab_size) ).copy()
SCREAMING_SNAKE_CASE_ = ramp_logits[1:, : vocab_size // 2] + vocab_size
SCREAMING_SNAKE_CASE_ = FlaxTopKLogitsWarper(3 )
SCREAMING_SNAKE_CASE_ = top_k_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
SCREAMING_SNAKE_CASE_ = 5
SCREAMING_SNAKE_CASE_ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
SCREAMING_SNAKE_CASE_ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] , (batch_size, length) ).copy()
SCREAMING_SNAKE_CASE_ = top_k_warp_safety_check(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = 10
SCREAMING_SNAKE_CASE_ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
SCREAMING_SNAKE_CASE_ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
SCREAMING_SNAKE_CASE_ = FlaxTopPLogitsWarper(0.8 )
SCREAMING_SNAKE_CASE_ = np.exp(top_p_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
SCREAMING_SNAKE_CASE_ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# check edge cases with negative and extreme logits
SCREAMING_SNAKE_CASE_ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
SCREAMING_SNAKE_CASE_ = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
SCREAMING_SNAKE_CASE_ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
SCREAMING_SNAKE_CASE_ = top_p_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = 20
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_lowerCAmelCase )
# check that min length is applied at length 5
SCREAMING_SNAKE_CASE_ = ids_tensor((batch_size, 20) , vocab_size=20 )
SCREAMING_SNAKE_CASE_ = 5
SCREAMING_SNAKE_CASE_ = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = min_dist_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('inf' )] )
# check that min length is not applied anymore at length 15
SCREAMING_SNAKE_CASE_ = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = 15
SCREAMING_SNAKE_CASE_ = min_dist_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = 20
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the bos_token_id score
SCREAMING_SNAKE_CASE_ = ids_tensor((batch_size, 1) , vocab_size=20 )
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = logits_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = logits_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = 20
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 5
SCREAMING_SNAKE_CASE_ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
SCREAMING_SNAKE_CASE_ = ids_tensor((batch_size, 4) , vocab_size=20 )
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = logits_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = logits_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 10
SCREAMING_SNAKE_CASE_ = 15
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE_ = ids_tensor((batch_size, sequence_length) , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = input_ids.copy()
SCREAMING_SNAKE_CASE_ = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
SCREAMING_SNAKE_CASE_ = FlaxTopKLogitsWarper(3 )
SCREAMING_SNAKE_CASE_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
SCREAMING_SNAKE_CASE_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = 10
# no processor list
SCREAMING_SNAKE_CASE_ = temp_dist_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = top_k_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = top_p_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = min_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = bos_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = eos_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
# with processor list
SCREAMING_SNAKE_CASE_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
SCREAMING_SNAKE_CASE_ = processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 10
SCREAMING_SNAKE_CASE_ = 15
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE_ = ids_tensor((batch_size, sequence_length) , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = input_ids.copy()
SCREAMING_SNAKE_CASE_ = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
SCREAMING_SNAKE_CASE_ = FlaxTopKLogitsWarper(3 )
SCREAMING_SNAKE_CASE_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
SCREAMING_SNAKE_CASE_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = 10
# no processor list
def run_no_processor_list(_lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = temp_dist_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = top_k_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = top_p_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = min_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = bos_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = eos_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
return scores
# with processor list
def run_processor_list(_lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
SCREAMING_SNAKE_CASE_ = processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
return scores
SCREAMING_SNAKE_CASE_ = jax.jit(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = jax.jit(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = jitted_run_no_processor_list(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = jitted_run_processor_list(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() ) | 31 |
import os
import sys
lowerCamelCase__ : Optional[int] = os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowerCamelCase__ : Optional[int] = [
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Any:
return AutoConfig.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[str]:
return AutoTokenizer.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModel.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Tuple:
return AutoModel.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]:
return AutoModelForCausalLM.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[Any]:
return AutoModelForMaskedLM.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[str]:
return AutoModelForSequenceClassification.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]:
return AutoModelForQuestionAnswering.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
| 33 | 0 |
from math import isqrt
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> list[int]:
"""simple docstring"""
_UpperCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = False
return [i for i in range(2 , SCREAMING_SNAKE_CASE_ ) if is_prime[i]]
def A__ ( SCREAMING_SNAKE_CASE_ : int = 10**8 ) -> int:
"""simple docstring"""
_UpperCAmelCase = calculate_prime_numbers(max_number // 2 )
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = len(SCREAMING_SNAKE_CASE_ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f'''{solution() = }''') | 32 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : str = (CMStochasticIterativeScheduler,)
__lowercase : List[str] = 10
def SCREAMING_SNAKE_CASE__ ( self:int , **_a:Optional[int] ):
snake_case__ = {
'''num_train_timesteps''': 2_01,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
config.update(**_a )
return config
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = 10
snake_case__ = self.get_scheduler_config()
snake_case__ = self.scheduler_classes[0](**_a )
scheduler.set_timesteps(_a )
snake_case__ = scheduler.timesteps[0]
snake_case__ = scheduler.timesteps[1]
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
snake_case__ = scheduler.step(_a , _a , _a ).prev_sample
snake_case__ = scheduler.step(_a , _a , _a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_a )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=_a )
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = 1
scheduler.set_timesteps(_a )
snake_case__ = scheduler.timesteps
snake_case__ = torch.manual_seed(0 )
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(_a ):
# 1. scale model input
snake_case__ = scheduler.scale_model_input(_a , _a )
# 2. predict noise residual
snake_case__ = model(_a , _a )
# 3. predict previous sample x_t-1
snake_case__ = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
snake_case__ = pred_prev_sample
snake_case__ = torch.sum(torch.abs(_a ) )
snake_case__ = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 192.7614 ) < 1e-2
assert abs(result_mean.item() - 0.2510 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = [1_06, 0]
scheduler.set_timesteps(timesteps=_a )
snake_case__ = scheduler.timesteps
snake_case__ = torch.manual_seed(0 )
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
snake_case__ = scheduler.scale_model_input(_a , _a )
# 2. predict noise residual
snake_case__ = model(_a , _a )
# 3. predict previous sample x_t-1
snake_case__ = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
snake_case__ = pred_prev_sample
snake_case__ = torch.sum(torch.abs(_a ) )
snake_case__ = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 347.6357 ) < 1e-2
assert abs(result_mean.item() - 0.4527 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = [39, 30, 12, 15, 0]
with self.assertRaises(_a , msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_a )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = [39, 30, 12, 1, 0]
snake_case__ = len(_a )
with self.assertRaises(_a , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_a , timesteps=_a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_a , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_a )
| 33 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
pass
class snake_case_ :
"""simple docstring"""
def __init__( self , lowerCamelCase_) -> None:
UpperCamelCase = data
UpperCamelCase = None
def __iter__( self) -> Optional[Any]:
UpperCamelCase = self
UpperCamelCase = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(lowerCamelCase_)
yield node.data
UpperCamelCase = node.next_node
@property
def UpperCAmelCase__ ( self) -> bool:
try:
list(self)
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = Node(1)
SCREAMING_SNAKE_CASE_ = Node(2)
SCREAMING_SNAKE_CASE_ = Node(3)
SCREAMING_SNAKE_CASE_ = Node(4)
print(root_node.has_loop) # False
SCREAMING_SNAKE_CASE_ = root_node.next_node
print(root_node.has_loop) # True
SCREAMING_SNAKE_CASE_ = Node(5)
SCREAMING_SNAKE_CASE_ = Node(6)
SCREAMING_SNAKE_CASE_ = Node(5)
SCREAMING_SNAKE_CASE_ = Node(6)
print(root_node.has_loop) # False
SCREAMING_SNAKE_CASE_ = Node(1)
print(root_node.has_loop) # False | 34 |
import numpy as np
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> np.ndarray:
return 1 / (1 + np.exp(-vector ))
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> np.ndarray:
return vector * sigmoid(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33 | 0 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
a_ :List[str] = logging.get_logger(__name__)
class lowercase ( _UpperCAmelCase ):
def __init__( self : Dict , *_lowercase : Tuple , **_lowercase : Tuple ):
warnings.warn(
'''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use FlavaImageProcessor instead.''' , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 35 |
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 100 ) -> int:
snake_case__ = set()
snake_case__ = 0
snake_case__ = n + 1 # maximum limit
for a in range(2 , __lowerCAmelCase ):
for b in range(2 , __lowerCAmelCase ):
snake_case__ = a**b # calculates the current power
collect_powers.add(__lowerCAmelCase ) # adds the result to the set
return len(__lowerCAmelCase )
if __name__ == "__main__":
print("""Number of terms """, solution(int(str(input()).strip())))
| 33 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : List[Any] = logging.get_logger(__name__)
def lowercase ( __A : Any ) -> Dict:
'''simple docstring'''
snake_case : int = DPTConfig()
if "large" in checkpoint_url:
snake_case : Optional[Any] = 1024
snake_case : List[str] = 4096
snake_case : Optional[int] = 24
snake_case : Dict = 16
snake_case : Optional[Any] = [5, 11, 17, 23]
snake_case : int = [256, 512, 1024, 1024]
snake_case : int = (1, 384, 384)
if "ade" in checkpoint_url:
snake_case : str = True
snake_case : Optional[int] = 150
snake_case : List[str] = """huggingface/label-files"""
snake_case : Optional[int] = """ade20k-id2label.json"""
snake_case : Optional[int] = json.load(open(cached_download(hf_hub_url(__A , __A , repo_type="""dataset""" ) ) , """r""" ) )
snake_case : Tuple = {int(__A ): v for k, v in idalabel.items()}
snake_case : Any = idalabel
snake_case : Any = {v: k for k, v in idalabel.items()}
snake_case : Dict = [1, 150, 480, 480]
return config, expected_shape
def lowercase ( __A : int ) -> int:
'''simple docstring'''
snake_case : List[Any] = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(__A , __A )
def lowercase ( __A : Tuple ) -> str:
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
snake_case : List[str] = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
snake_case : int = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
snake_case : Tuple = name.replace("""patch_embed""" , """patch_embeddings""" )
if "pos_embed" in name:
snake_case : List[str] = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
snake_case : int = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
snake_case : int = name.replace("""proj""" , """projection""" )
if "blocks" in name:
snake_case : Union[str, Any] = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
snake_case : Optional[int] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
snake_case : List[Any] = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name:
snake_case : List[Any] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
snake_case : List[str] = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
snake_case : str = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
snake_case : Dict = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
snake_case : Optional[int] = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
snake_case : Tuple = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
snake_case : Any = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
snake_case : Optional[Any] = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
snake_case : int = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
snake_case : Tuple = name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
snake_case : Tuple = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
snake_case : Optional[Any] = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
snake_case : Optional[Any] = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
snake_case : Dict = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
snake_case : List[Any] = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
snake_case : List[Any] = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
snake_case : List[str] = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
snake_case : Optional[Any] = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
snake_case : List[str] = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
snake_case : Any = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
snake_case : Tuple = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
snake_case : Optional[int] = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
snake_case : Any = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
snake_case : List[Any] = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
snake_case : List[str] = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
snake_case : str = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
snake_case : Any = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
snake_case : Optional[Any] = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
snake_case : Optional[int] = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
snake_case : Optional[int] = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
snake_case : Union[str, Any] = name.replace("""auxlayer""" , """auxiliary_head.head""" )
return name
def lowercase ( __A : Dict , __A : Tuple ) -> Dict:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case : Any = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
snake_case : List[str] = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case : Optional[int] = in_proj_weight[: config.hidden_size, :]
snake_case : Optional[Any] = in_proj_bias[: config.hidden_size]
snake_case : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case : List[str] = in_proj_weight[
-config.hidden_size :, :
]
snake_case : Tuple = in_proj_bias[-config.hidden_size :]
def lowercase ( ) -> Any:
'''simple docstring'''
snake_case : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case : List[Any] = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def lowercase ( __A : Tuple , __A : Optional[int] , __A : Optional[Any] , __A : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case , snake_case : Union[str, Any] = get_dpt_config(__A )
# load original state_dict from URL
snake_case : Union[str, Any] = torch.hub.load_state_dict_from_url(__A , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(__A )
# rename keys
for key in state_dict.copy().keys():
snake_case : Optional[Any] = state_dict.pop(__A )
snake_case : List[str] = val
# read in qkv matrices
read_in_q_k_v(__A , __A )
# load HuggingFace model
snake_case : int = DPTForSemanticSegmentation(__A ) if """ade""" in checkpoint_url else DPTForDepthEstimation(__A )
model.load_state_dict(__A )
model.eval()
# Check outputs on an image
snake_case : int = 480 if """ade""" in checkpoint_url else 384
snake_case : Optional[int] = DPTImageProcessor(size=__A )
snake_case : List[Any] = prepare_img()
snake_case : Optional[Any] = image_processor(__A , return_tensors="""pt""" )
# forward pass
snake_case : Any = model(**__A ).logits if """ade""" in checkpoint_url else model(**__A ).predicted_depth
# Assert logits
snake_case : int = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]] )
if "ade" in checkpoint_url:
snake_case : List[str] = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]] )
assert outputs.shape == torch.Size(__A )
assert (
torch.allclose(outputs[0, 0, :3, :3] , __A , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , __A )
)
Path(__A ).mkdir(exist_ok=__A )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__A )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(__A , __A ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=__A , )
image_processor.push_to_hub(
repo_path_or_name=Path(__A , __A ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=__A , )
if __name__ == "__main__":
__lowercase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
__lowercase : List[Any] = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 36 |
from copy import deepcopy
class __magic_name__ :
'''simple docstring'''
def __init__( self:int , _a:list[int] | None = None , _a:int | None = None ):
if arr is None and size is not None:
snake_case__ = size
snake_case__ = [0] * size
elif arr is not None:
self.init(_a )
else:
raise ValueError('''Either arr or size must be specified''' )
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:list[int] ):
snake_case__ = len(_a )
snake_case__ = deepcopy(_a )
for i in range(1 , self.size ):
snake_case__ = self.next_(_a )
if j < self.size:
self.tree[j] += self.tree[i]
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
snake_case__ = self.next_(_a )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _a:int ):
return index + (index & (-index))
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _a:int ):
return index - (index & (-index))
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:int , _a:int ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
snake_case__ = self.next_(_a )
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:int ):
self.add(_a , value - self.get(_a ) )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:int ):
if right == 0:
return 0
snake_case__ = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
snake_case__ = self.prev(_a )
return result
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:int ):
return self.prefix(_a ) - self.prefix(_a )
def SCREAMING_SNAKE_CASE__ ( self:str , _a:int ):
return self.query(_a , index + 1 )
def SCREAMING_SNAKE_CASE__ ( self:str , _a:int ):
value -= self.tree[0]
if value < 0:
return -1
snake_case__ = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
snake_case__ = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33 | 0 |
def UpperCamelCase_ ( __a ) -> float:
return 10 - x * x
def UpperCamelCase_ ( __a , __a ) -> float:
# Bolzano theory in order to find if there is a root between a and b
if equation(__a ) * equation(__a ) >= 0:
raise ValueError("Wrong space!" )
a__ : Any = a
while (b - a) >= 0.01:
# Find middle point
a__ : str = (a + b) / 2
# Check if middle point is root
if equation(__a ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__a ) * equation(__a ) < 0:
a__ : Dict = c
else:
a__ : Tuple = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 37 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class __magic_name__ :
'''simple docstring'''
__lowercase : int = BlenderbotConfig
__lowercase : Any = {}
__lowercase : Optional[Any] = 'gelu'
def __init__( self:Tuple , _a:Optional[Any] , _a:Optional[Any]=13 , _a:Tuple=7 , _a:Union[str, Any]=True , _a:int=False , _a:int=99 , _a:Optional[int]=32 , _a:List[str]=2 , _a:List[str]=4 , _a:List[Any]=37 , _a:Any=0.1 , _a:int=0.1 , _a:List[Any]=20 , _a:List[str]=2 , _a:int=1 , _a:Dict=0 , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = eos_token_id
snake_case__ = pad_token_id
snake_case__ = bos_token_id
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case__ = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case__ = prepare_blenderbot_inputs_dict(_a , _a , _a )
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self:int , _a:Optional[Any] , _a:int ):
snake_case__ = TFBlenderbotModel(config=_a ).get_decoder()
snake_case__ = inputs_dict['''input_ids''']
snake_case__ = input_ids[:1, :]
snake_case__ = inputs_dict['''attention_mask'''][:1, :]
snake_case__ = inputs_dict['''head_mask''']
snake_case__ = 1
# first forward pass
snake_case__ = model(_a , attention_mask=_a , head_mask=_a , use_cache=_a )
snake_case__ , snake_case__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
snake_case__ = tf.concat([input_ids, next_tokens] , axis=-1 )
snake_case__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
snake_case__ = model(_a , attention_mask=_a )[0]
snake_case__ = model(_a , attention_mask=_a , past_key_values=_a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
snake_case__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
snake_case__ = output_from_no_past[:, -3:, random_slice_idx]
snake_case__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_a , _a , rtol=1e-3 )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> Tuple:
if attention_mask is None:
snake_case__ = tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : List[str] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__lowercase : Any = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__lowercase : Tuple = (
{
'conversational': TFBlenderbotForConditionalGeneration,
'feature-extraction': TFBlenderbotModel,
'summarization': TFBlenderbotForConditionalGeneration,
'text2text-generation': TFBlenderbotForConditionalGeneration,
'translation': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowercase : Any = True
__lowercase : int = False
__lowercase : int = False
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = TFBlenderbotModelTester(self )
snake_case__ = ConfigTester(self , config_class=_a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_a )
@require_tokenizers
@require_tf
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[int] = ['My friends are cool but they eat too many carbs.']
__lowercase : Optional[int] = 'facebook/blenderbot-400M-distill'
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.tokenizer(self.src_text , return_tensors='''tf''' )
snake_case__ = self.model.generate(
model_inputs.input_ids , )
snake_case__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_a )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 33 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
A_ : Union[str, Any] = logging.get_logger(__name__)
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 38 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = 0
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:str ):
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
snake_case__ = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
snake_case__ = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
snake_case__ = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
snake_case__ = AutoImageProcessor.from_pretrained(_a ).to_dict()
config_dict.pop('''image_processor_type''' )
snake_case__ = CLIPImageProcessor(**_a )
# save in new folder
model_config.save_pretrained(_a )
config.save_pretrained(_a )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
# make sure private variable is not incorrectly saved
snake_case__ = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
with self.assertRaisesRegex(
_a , '''clip-base is not a local folder and is not a valid model identifier''' ):
snake_case__ = AutoImageProcessor.from_pretrained('''clip-base''' )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
with self.assertRaisesRegex(
_a , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
snake_case__ = AutoImageProcessor.from_pretrained(_a , revision='''aaaaaa''' )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
with self.assertRaisesRegex(
_a , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_a ):
snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
snake_case__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
snake_case__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
snake_case__ = AutoImageProcessor.from_pretrained(_a , trust_remote_code=_a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoImageProcessor.register(_a , _a )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
snake_case__ = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
snake_case__ = CustomImageProcessor.from_pretrained(_a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : List[str] = True
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# If remote code is not set, the default is to use local
snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
snake_case__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
snake_case__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(_a , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 33 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'''configuration_efficientnet''': [
'''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientNetConfig''',
'''EfficientNetOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''EfficientNetImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientNetForImageClassification''',
'''EfficientNetModel''',
'''EfficientNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 39 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ : int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=False ) -> int:
snake_case__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case__ = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ) -> Dict:
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ = ''''''
else:
snake_case__ = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
snake_case__ = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ = in_proj_weight[
: config.hidden_size, :
]
snake_case__ = in_proj_bias[: config.hidden_size]
snake_case__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[Any]:
snake_case__ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
snake_case__ = dct.pop(__lowerCAmelCase )
snake_case__ = val
def SCREAMING_SNAKE_CASE ( ) -> str:
snake_case__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
snake_case__ = ViTConfig()
snake_case__ = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
snake_case__ = True
snake_case__ = int(vit_name[-12:-10] )
snake_case__ = int(vit_name[-9:-6] )
else:
snake_case__ = 1000
snake_case__ = '''huggingface/label-files'''
snake_case__ = '''imagenet-1k-id2label.json'''
snake_case__ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
snake_case__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case__ = idalabel
snake_case__ = {v: k for k, v in idalabel.items()}
snake_case__ = int(vit_name[-6:-4] )
snake_case__ = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('''tiny''' ):
snake_case__ = 192
snake_case__ = 768
snake_case__ = 12
snake_case__ = 3
elif vit_name[9:].startswith('''small''' ):
snake_case__ = 384
snake_case__ = 1536
snake_case__ = 12
snake_case__ = 6
else:
pass
else:
if vit_name[4:].startswith('''small''' ):
snake_case__ = 768
snake_case__ = 2304
snake_case__ = 8
snake_case__ = 8
elif vit_name[4:].startswith('''base''' ):
pass
elif vit_name[4:].startswith('''large''' ):
snake_case__ = 1024
snake_case__ = 4096
snake_case__ = 24
snake_case__ = 16
elif vit_name[4:].startswith('''huge''' ):
snake_case__ = 1280
snake_case__ = 5120
snake_case__ = 32
snake_case__ = 16
# load original model from timm
snake_case__ = timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ = timm_model.state_dict()
if base_model:
remove_classification_head_(__lowerCAmelCase )
snake_case__ = create_rename_keys(__lowerCAmelCase , __lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case__ = ViTModel(__lowerCAmelCase ).eval()
else:
snake_case__ = ViTForImageClassification(__lowerCAmelCase ).eval()
model.load_state_dict(__lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
snake_case__ = DeiTImageProcessor(size=config.image_size )
else:
snake_case__ = ViTImageProcessor(size=config.image_size )
snake_case__ = image_processor(images=prepare_img() , return_tensors='''pt''' )
snake_case__ = encoding['''pixel_values''']
snake_case__ = model(__lowerCAmelCase )
if base_model:
snake_case__ = timm_model.forward_features(__lowerCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__lowerCAmelCase , outputs.pooler_output , atol=1e-3 )
else:
snake_case__ = timm_model(__lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCAmelCase , outputs.logits , atol=1e-3 )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_patch16_224""",
type=str,
help="""Name of the ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowerCamelCase__ : str = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 33 | 0 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__UpperCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
__UpperCAmelCase = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : str = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir, 'schedulers/' ) )
UpperCamelCase : int = self.diffusers_dir
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE_, 'src/diffusers/schedulers/scheduling_ddpm.py' ), os.path.join(self.diffusers_dir, 'schedulers/scheduling_ddpm.py' ), )
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : Dict = 'src/diffusers'
shutil.rmtree(self.diffusers_dir )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> Optional[int]:
UpperCamelCase : str = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
UpperCamelCase : List[Any] = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
UpperCamelCase : Union[str, Any] = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=119 )
UpperCamelCase : Any = black.format_str(SCREAMING_SNAKE_CASE_, mode=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = os.path.join(self.diffusers_dir, 'new_code.py' )
with open(SCREAMING_SNAKE_CASE_, 'w', newline='\n' ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(SCREAMING_SNAKE_CASE_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name, overwrite=SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_, 'r' ) as f:
self.assertTrue(f.read(), SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> str:
UpperCamelCase : Any = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' )
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Union[str, Any]:
# Base copy consistency
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput', 'DDPMSchedulerOutput', REFERENCE_CODE + '\n', )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput', 'DDPMSchedulerOutput', SCREAMING_SNAKE_CASE_, )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test', 'TestSchedulerOutput', re.sub('DDPM', 'Test', SCREAMING_SNAKE_CASE_ ), )
# Copy consistency with a really long name
UpperCamelCase : Any = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""", F"""{long_class_name}SchedulerOutput""", re.sub('Bert', SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ), )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test', 'TestSchedulerOutput', SCREAMING_SNAKE_CASE_, overwrite_result=re.sub('DDPM', 'Test', SCREAMING_SNAKE_CASE_ ), )
| 40 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : List[str] = ['image_processor', 'tokenizer']
__lowercase : str = 'AutoImageProcessor'
__lowercase : Dict = 'AutoTokenizer'
def __init__( self:int , _a:List[str]=None , _a:Optional[Any]=None , **_a:List[str] ):
snake_case__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
snake_case__ = kwargs.pop('''feature_extractor''' )
snake_case__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
snake_case__ = self.image_processor
snake_case__ = False
def __call__( self:Optional[int] , *_a:str , **_a:int ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_a , **_a )
snake_case__ = kwargs.pop('''images''' , _a )
snake_case__ = kwargs.pop('''text''' , _a )
if len(_a ) > 0:
snake_case__ = args[0]
snake_case__ = args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
snake_case__ = self.image_processor(_a , *_a , **_a )
if text is not None:
snake_case__ = self.tokenizer(_a , **_a )
if text is None:
return inputs
elif images is None:
return encodings
else:
snake_case__ = encodings['''input_ids''']
return inputs
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , *_a:Union[str, Any] , **_a:Any ):
return self.tokenizer.batch_decode(*_a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple , *_a:Union[str, Any] , **_a:Optional[int] ):
return self.tokenizer.decode(*_a , **_a )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
snake_case__ = True
snake_case__ = self.tokenizer
yield
snake_case__ = self.image_processor
snake_case__ = False
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Dict , _a:Dict=False , _a:Optional[int]=None ):
if added_vocab is None:
snake_case__ = self.tokenizer.get_added_vocab()
snake_case__ = {}
while tokens:
snake_case__ = re.search(r'''<s_(.*?)>''' , _a , re.IGNORECASE )
if start_token is None:
break
snake_case__ = start_token.group(1 )
snake_case__ = re.search(rF"""</s_{key}>""" , _a , re.IGNORECASE )
snake_case__ = start_token.group()
if end_token is None:
snake_case__ = tokens.replace(_a , '''''' )
else:
snake_case__ = end_token.group()
snake_case__ = re.escape(_a )
snake_case__ = re.escape(_a )
snake_case__ = re.search(F"""{start_token_escaped}(.*?){end_token_escaped}""" , _a , re.IGNORECASE )
if content is not None:
snake_case__ = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
snake_case__ = self.tokenajson(_a , is_inner_value=_a , added_vocab=_a )
if value:
if len(_a ) == 1:
snake_case__ = value[0]
snake_case__ = value
else: # leaf nodes
snake_case__ = []
for leaf in content.split(r'''<sep/>''' ):
snake_case__ = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
snake_case__ = leaf[1:-2] # for categorical special tokens
output[key].append(_a )
if len(output[key] ) == 1:
snake_case__ = output[key][0]
snake_case__ = tokens[tokens.find(_a ) + len(_a ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_a , added_vocab=_a )
if len(_a ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 33 | 0 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
lowerCAmelCase__ = '''\
@inproceedings{popovic-2015-chrf,
title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",
month = sep,
year = "2015",
address = "Lisbon, Portugal",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W15-3049",
doi = "10.18653/v1/W15-3049",
pages = "392--395",
}
@inproceedings{popovic-2017-chrf,
title = "chr{F}++: words helping character n-grams",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Second Conference on Machine Translation",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-4770",
doi = "10.18653/v1/W17-4770",
pages = "612--618",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
lowerCAmelCase__ = '''\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
'''
lowerCAmelCase__ = '''
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
\'score\' (float): The chrF (chrF++) score,
\'char_order\' (int): The character n-gram order,
\'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
\'beta\' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ (datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='''https://github.com/mjpost/sacreBLEU#chrf--chrf''' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ,id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' ,id='''sequence''' ) ,id='''references''' ),
} ) ,codebase_urls=['''https://github.com/mjpost/sacreBLEU#chrf--chrf'''] ,reference_urls=[
'''https://github.com/m-popovic/chrF''',
] ,)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : str ,lowercase__ : List[Any] ,lowercase__ : int = CHRF.CHAR_ORDER ,lowercase__ : int = CHRF.WORD_ORDER ,lowercase__ : int = CHRF.BETA ,lowercase__ : bool = False ,lowercase__ : bool = False ,lowercase__ : bool = False ,):
__lowercase = len(references[0] )
if any(len(lowercase__ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
__lowercase = [[refs[i] for refs in references] for i in range(lowercase__ )]
__lowercase = CHRF(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = sb_chrf.corpus_score(lowercase__ ,lowercase__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 41 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __magic_name__ :
'''simple docstring'''
def __init__( self:Optional[Any] , _a:int , _a:str=3 , _a:Optional[int]=32 , _a:Optional[Any]=3 , _a:Tuple=10 , _a:List[Any]=[8, 16, 32, 64] , _a:str=[1, 1, 2, 1] , _a:Any=True , _a:List[Any]=True , _a:List[str]="relu" , _a:int=3 , _a:Tuple=None , _a:Tuple=["stage2", "stage3", "stage4"] , _a:List[Any]=[2, 3, 4] , _a:Union[str, Any]=1 , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = image_size
snake_case__ = num_channels
snake_case__ = embeddings_size
snake_case__ = hidden_sizes
snake_case__ = depths
snake_case__ = is_training
snake_case__ = use_labels
snake_case__ = hidden_act
snake_case__ = num_labels
snake_case__ = scope
snake_case__ = len(_a )
snake_case__ = out_features
snake_case__ = out_indices
snake_case__ = num_groups
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:Optional[int] , _a:Tuple , _a:int ):
snake_case__ = BitModel(config=_a )
model.to(_a )
model.eval()
snake_case__ = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE__ ( self:int , _a:Tuple , _a:Any , _a:Union[str, Any] ):
snake_case__ = self.num_labels
snake_case__ = BitForImageClassification(_a )
model.to(_a )
model.eval()
snake_case__ = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self:str , _a:str , _a:List[str] , _a:Any ):
snake_case__ = BitBackbone(config=_a )
model.to(_a )
model.eval()
snake_case__ = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
snake_case__ = None
snake_case__ = BitBackbone(config=_a )
model.to(_a )
model.eval()
snake_case__ = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ = config_and_inputs
snake_case__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Any = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
__lowercase : int = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
__lowercase : Tuple = False
__lowercase : Optional[Any] = False
__lowercase : str = False
__lowercase : Tuple = False
__lowercase : Tuple = False
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = BitModelTester(self )
snake_case__ = ConfigTester(self , config_class=_a , has_text_modality=_a )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return
@unittest.skip(reason='''Bit does not output attentions''' )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
pass
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = model_class(_a )
snake_case__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ = [*signature.parameters.keys()]
snake_case__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = model_class(config=_a )
for name, module in model.named_modules():
if isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
def check_hidden_states_output(_a:List[Any] , _a:int , _a:Union[str, Any] ):
snake_case__ = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
snake_case__ = model(**self._prepare_for_class(_a , _a ) )
snake_case__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case__ = self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
snake_case__ = layer_type
snake_case__ = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ = True
check_hidden_states_output(_a , _a , _a )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
pass
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ = BitModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def SCREAMING_SNAKE_CASE ( ) -> Any:
snake_case__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a )
snake_case__ = self.default_image_processor
snake_case__ = prepare_img()
snake_case__ = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
snake_case__ = model(**_a )
# verify the logits
snake_case__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _a )
snake_case__ = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
@require_torch
class __magic_name__ (snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[Any] = (BitBackbone,) if is_torch_available() else ()
__lowercase : int = BitConfig
__lowercase : Any = False
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = BitModelTester(self )
| 33 | 0 |
'''simple docstring'''
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"
),
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'xlm-prophetnet'
SCREAMING_SNAKE_CASE_ = ['past_key_values']
SCREAMING_SNAKE_CASE_ = {
'num_attention_heads': 'num_encoder_attention_heads',
}
def __init__( self , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = "gelu" , SCREAMING_SNAKE_CASE_ = 30522 , SCREAMING_SNAKE_CASE_ = 1024 , SCREAMING_SNAKE_CASE_ = 4096 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = 4096 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 0.02 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 2 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = 128 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 2 , **SCREAMING_SNAKE_CASE_ , ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = encoder_ffn_dim
lowerCamelCase_ = num_encoder_layers
lowerCamelCase_ = num_encoder_attention_heads
lowerCamelCase_ = decoder_ffn_dim
lowerCamelCase_ = num_decoder_layers
lowerCamelCase_ = num_decoder_attention_heads
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = init_std # Normal(0, this parameter)
lowerCamelCase_ = activation_function
# parameters for xlmprophetnet
lowerCamelCase_ = ngram
lowerCamelCase_ = num_buckets
lowerCamelCase_ = relative_max_distance
lowerCamelCase_ = disable_ngram_loss
lowerCamelCase_ = eps
# 3 Types of Dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = dropout
lowerCamelCase_ = use_cache
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , add_cross_attention=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@property
def UpperCamelCase( self ) -> int:
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'
' `num_decoder_layers`.' )
| 42 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
lowerCamelCase__ : Any = """\
"""
lowerCamelCase__ : List[str] = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
lowerCamelCase__ : Any = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __magic_name__ (datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:int , _a:List[Any] , _a:int = 16 , _a:bool = True , _a:Any=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
snake_case__ = '''cuda'''
else:
snake_case__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
snake_case__ = AutoModelForCausalLM.from_pretrained(_a )
snake_case__ = model.to(_a )
snake_case__ = AutoTokenizer.from_pretrained(_a )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
snake_case__ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_a ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
snake_case__ = model.config.max_length - 1
else:
snake_case__ = model.config.max_length
snake_case__ = tokenizer(
_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , return_tensors='''pt''' , return_attention_mask=_a , ).to(_a )
snake_case__ = encodings['''input_ids''']
snake_case__ = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
snake_case__ = []
snake_case__ = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(_a ) , _a ) ):
snake_case__ = min(start_index + batch_size , len(_a ) )
snake_case__ = encoded_texts[start_index:end_index]
snake_case__ = attn_masks[start_index:end_index]
if add_start_token:
snake_case__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_a )
snake_case__ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
snake_case__ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_a ), attn_mask] , dim=1 )
snake_case__ = encoded_batch
with torch.no_grad():
snake_case__ = model(_a , attention_mask=_a ).logits
snake_case__ = out_logits[..., :-1, :].contiguous()
snake_case__ = labels[..., 1:].contiguous()
snake_case__ = attn_mask[..., 1:].contiguous()
snake_case__ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _a ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_a )}
| 33 | 0 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for param in module.parameters():
lowercase__ = False
def _a ( ):
"""simple docstring"""
lowercase__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowercase__ = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = plt.imshow(SCREAMING_SNAKE_CASE )
fig.axes.get_xaxis().set_visible(SCREAMING_SNAKE_CASE )
fig.axes.get_yaxis().set_visible(SCREAMING_SNAKE_CASE )
plt.show()
def _a ( ):
"""simple docstring"""
lowercase__ = datetime.now()
lowercase__ = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 43 |
import os
from datetime import datetime as dt
from github import Github
lowerCamelCase__ : int = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
snake_case__ = Github(os.environ['''GITHUB_TOKEN'''] )
snake_case__ = g.get_repo('''huggingface/diffusers''' )
snake_case__ = repo.get_issues(state='''open''' )
for issue in open_issues:
snake_case__ = sorted(issue.get_comments() , key=lambda __lowerCAmelCase : i.created_at , reverse=__lowerCAmelCase )
snake_case__ = comments[0] if len(__lowerCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 33 | 0 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase__ :
def __init__( self : List[Any],__A : str,__A : List[str]=1_3,__A : str=3_2,__A : Tuple=2,__A : Any=3,__A : Dict=1_6,__A : Dict=[3_2, 6_4, 1_2_8],__A : List[str]=[1, 2, 1],__A : str=[2, 2, 4],__A : Optional[int]=2,__A : Dict=2.0,__A : str=True,__A : Tuple=0.0,__A : int=0.0,__A : List[str]=0.1,__A : Any="gelu",__A : List[Any]=False,__A : Optional[Any]=True,__A : List[str]=0.02,__A : Tuple=1e-5,__A : Any=True,__A : Tuple=None,__A : Tuple=True,__A : Tuple=1_0,__A : List[Any]=8,__A : Optional[int]=["stage1", "stage2"],__A : int=[1, 2],):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : Optional[int] = image_size
_lowerCamelCase : int = patch_size
_lowerCamelCase : Optional[Any] = num_channels
_lowerCamelCase : int = embed_dim
_lowerCamelCase : int = hidden_sizes
_lowerCamelCase : List[Any] = depths
_lowerCamelCase : Any = num_heads
_lowerCamelCase : List[str] = window_size
_lowerCamelCase : str = mlp_ratio
_lowerCamelCase : Any = qkv_bias
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : List[str] = drop_path_rate
_lowerCamelCase : str = hidden_act
_lowerCamelCase : Union[str, Any] = use_absolute_embeddings
_lowerCamelCase : List[Any] = patch_norm
_lowerCamelCase : Tuple = layer_norm_eps
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : Tuple = scope
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : int = type_sequence_label_size
_lowerCamelCase : Tuple = encoder_stride
_lowerCamelCase : Any = out_features
_lowerCamelCase : Any = out_indices
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : List[Any] = None
if self.use_labels:
_lowerCamelCase : str = ids_tensor([self.batch_size],self.type_sequence_label_size )
_lowerCamelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Union[str, Any] ):
return FocalNetConfig(
image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,embed_dim=self.embed_dim,hidden_sizes=self.hidden_sizes,depths=self.depths,num_heads=self.num_heads,window_size=self.window_size,mlp_ratio=self.mlp_ratio,qkv_bias=self.qkv_bias,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,drop_path_rate=self.drop_path_rate,hidden_act=self.hidden_act,use_absolute_embeddings=self.use_absolute_embeddings,path_norm=self.patch_norm,layer_norm_eps=self.layer_norm_eps,initializer_range=self.initializer_range,encoder_stride=self.encoder_stride,out_features=self.out_features,out_indices=self.out_indices,)
def lowerCamelCase_ ( self : int,__A : Union[str, Any],__A : Tuple,__A : List[Any] ):
_lowerCamelCase : Optional[Any] = FocalNetModel(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : Optional[Any] = model(__A )
_lowerCamelCase : Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCamelCase : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, expected_seq_len, expected_dim) )
def lowerCamelCase_ ( self : int,__A : Optional[int],__A : int,__A : Optional[int] ):
_lowerCamelCase : Any = FocalNetBackbone(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[str] = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ),len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ),[self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ),len(config.out_features ) )
self.parent.assertListEqual(model.channels,config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
_lowerCamelCase : List[str] = None
_lowerCamelCase : List[str] = FocalNetBackbone(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : str = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ),1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ),[self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ),1 )
self.parent.assertListEqual(model.channels,[config.hidden_sizes[-1]] )
def lowerCamelCase_ ( self : Optional[int],__A : Optional[int],__A : Dict,__A : Dict ):
_lowerCamelCase : List[Any] = FocalNetForMaskedImageModeling(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[str] = model(__A )
self.parent.assertEqual(
result.reconstruction.shape,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCamelCase : Dict = 1
_lowerCamelCase : Any = FocalNetForMaskedImageModeling(__A )
model.to(__A )
model.eval()
_lowerCamelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : Optional[int] = model(__A )
self.parent.assertEqual(result.reconstruction.shape,(self.batch_size, 1, self.image_size, self.image_size) )
def lowerCamelCase_ ( self : List[Any],__A : Union[str, Any],__A : List[Any],__A : Optional[Any] ):
_lowerCamelCase : Union[str, Any] = self.type_sequence_label_size
_lowerCamelCase : Optional[Any] = FocalNetForImageClassification(__A )
model.to(__A )
model.eval()
_lowerCamelCase : Optional[int] = model(__A,labels=__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCamelCase : str = 1
_lowerCamelCase : str = FocalNetForImageClassification(__A )
model.to(__A )
model.eval()
_lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : List[Any] = model(__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : int = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = FocalNetModelTester(self )
_lowerCamelCase : int = ConfigTester(self,config_class=__A,embed_dim=3_7,has_text_modality=__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : List[str] ):
return
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def lowerCamelCase_ ( self : Optional[int] ):
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def lowerCamelCase_ ( self : List[str] ):
pass
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : str = model_class(__A )
self.assertIsInstance(model.get_input_embeddings(),(nn.Module) )
_lowerCamelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A,nn.Linear ) )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : Union[str, Any] = model_class(__A )
_lowerCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : int = [*signature.parameters.keys()]
_lowerCamelCase : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1],__A )
def lowerCamelCase_ ( self : Tuple,__A : Any,__A : List[Any],__A : str,__A : Any ):
_lowerCamelCase : Union[str, Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**self._prepare_for_class(__A,__A ) )
_lowerCamelCase : Optional[int] = outputs.hidden_states
_lowerCamelCase : int = getattr(
self.model_tester,"expected_num_hidden_layers",len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__A ),__A )
# FocalNet has a different seq_length
_lowerCamelCase : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ),[num_patches, self.model_tester.embed_dim],)
_lowerCamelCase : Any = outputs.reshaped_hidden_states
self.assertEqual(len(__A ),__A )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Tuple = reshaped_hidden_states[0].shape
_lowerCamelCase : List[str] = (
reshaped_hidden_states[0].view(__A,__A,height * width ).permute(0,2,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ),[num_patches, self.model_tester.embed_dim],)
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase , _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,__A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,__A )
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Tuple = 3
_lowerCamelCase : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : Any = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCamelCase : int = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Optional[Any] = True
self.check_hidden_states_output(__A,__A,__A,(padded_height, padded_width) )
@slow
def lowerCamelCase_ ( self : Tuple ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Dict = FocalNetModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[Any] = _config_zero_init(__A )
for model_class in self.all_model_classes:
_lowerCamelCase : Any = model_class(config=__A )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),[0.0, 1.0],msg=f'Parameter {name} of model {model_class} seems not properly initialized',)
@require_vision
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Union[str, Any] ):
# TODO update organization
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Any = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(__A )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_lowerCamelCase : Dict = image_processor(images=__A,return_tensors="pt" ).to(__A )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**__A )
# verify the logits
_lowerCamelCase : List[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape,__A )
_lowerCamelCase : List[str] = torch.tensor([0.2166, -0.4368, 0.2191] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3],__A,atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item(),2_8_1 )
@require_torch
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase_ = FocalNetConfig
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : int = FocalNetModelTester(self ) | 44 |
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'''kwargs, expected''' , [
({'''num_shards''': 0, '''max_num_jobs''': 1}, []),
({'''num_shards''': 10, '''max_num_jobs''': 1}, [range(10 )]),
({'''num_shards''': 10, '''max_num_jobs''': 10}, [range(__lowerCAmelCase , i + 1 ) for i in range(10 )]),
({'''num_shards''': 1, '''max_num_jobs''': 10}, [range(1 )]),
({'''num_shards''': 10, '''max_num_jobs''': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'''num_shards''': 3, '''max_num_jobs''': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
snake_case__ = _distribute_shards(**__lowerCAmelCase )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, max_num_jobs, expected''' , [
({'''foo''': 0}, 10, [{'''foo''': 0}]),
({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]),
({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]),
({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]),
({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]),
] , )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
snake_case__ = _split_gen_kwargs(__lowerCAmelCase , __lowerCAmelCase )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, expected''' , [
({'''foo''': 0}, 1),
({'''shards''': [0]}, 1),
({'''shards''': [0, 1, 2, 3]}, 4),
({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4),
({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4),
({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError),
] , )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
if expected is RuntimeError:
with pytest.raises(__lowerCAmelCase ):
_number_of_shards_in_gen_kwargs(__lowerCAmelCase )
else:
snake_case__ = _number_of_shards_in_gen_kwargs(__lowerCAmelCase )
assert out == expected
| 33 | 0 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def A ( lowercase__ : int ) -> Optional[int]:
UpperCamelCase__ :Dict = FileLock(str(tmpdir / """foo.lock""" ) )
UpperCamelCase__ :str = FileLock(str(tmpdir / """foo.lock""" ) )
UpperCamelCase__ :List[str] = 0.01
with locka.acquire():
with pytest.raises(lowercase__ ):
UpperCamelCase__ :Optional[Any] = time.time()
locka.acquire(lowercase__ )
assert time.time() - _start > timeout
def A ( lowercase__ : str ) -> int:
UpperCamelCase__ :str = """a""" * 1000 + """.lock"""
UpperCamelCase__ :str = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(lowercase__ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
UpperCamelCase__ :List[Any] = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(lowercase__ ):
locka.acquire(0 ) | 45 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : str = IFImgaImgSuperResolutionPipeline
__lowercase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'}
__lowercase : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'} )
__lowercase : List[str] = PipelineTesterMixin.required_optional_params - {'latents'}
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
return self._get_superresolution_dummy_components()
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:Optional[Any]=0 ):
if str(_a ).startswith('''mps''' ):
snake_case__ = torch.manual_seed(_a )
else:
snake_case__ = torch.Generator(device=_a ).manual_seed(_a )
snake_case__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
snake_case__ = floats_tensor((1, 3, 16, 16) , rng=random.Random(_a ) ).to(_a )
snake_case__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self:str ):
self._test_save_load_local()
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 33 | 0 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_lowerCAmelCase : Dict = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
@dataclass
class A_ :
lowerCAmelCase__ = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'The column name of the images in the files.'} )
lowerCAmelCase__ = field(default=_a , metadata={'help': 'A folder containing the training data.'} )
lowerCAmelCase__ = field(default=_a , metadata={'help': 'A folder containing the validation data.'} )
lowerCAmelCase__ = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
lowerCAmelCase__ = field(
default=_a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCAmelCase__ = field(
default=_a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = {}
if self.train_dir is not None:
_lowerCamelCase : Optional[Any] = self.train_dir
if self.validation_dir is not None:
_lowerCamelCase : str = self.validation_dir
_lowerCamelCase : Any = data_files if data_files else None
@dataclass
class A_ :
lowerCAmelCase__ = field(
default=_a , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'} )
lowerCAmelCase__ = field(
default=_a , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
lowerCAmelCase__ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCAmelCase__ = field(default=_a , metadata={'help': 'Name or path of preprocessor config.'} )
lowerCAmelCase__ = field(
default=_a , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
lowerCAmelCase__ = field(
default=0.75 , metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'} )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'Whether or not to train with normalized pixel values as target.'} )
@dataclass
class A_ ( _a ):
lowerCAmelCase__ = field(
default=1E-3 , metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'} )
def lowerCamelCase_( _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : int = torch.stack([example["pixel_values"] for example in examples] )
return {"pixel_values": pixel_values}
def lowerCamelCase_( ) -> Any:
'''simple docstring'''
_lowerCamelCase : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_mae" , _lowerCamelCase , _lowerCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowerCamelCase : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(_lowerCamelCase )
transformers.utils.logging.set_verbosity(_lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_lowerCamelCase : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCamelCase : Tuple = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset.
_lowerCamelCase : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
_lowerCamelCase : int = None if "validation" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _lowerCamelCase ) and data_args.train_val_split > 0.0:
_lowerCamelCase : Dict = ds["train"].train_test_split(data_args.train_val_split )
_lowerCamelCase : List[Any] = split["train"]
_lowerCamelCase : List[str] = split["test"]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCamelCase : Tuple = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
_lowerCamelCase : Dict = ViTMAEConfig.from_pretrained(model_args.config_name , **_lowerCamelCase )
elif model_args.model_name_or_path:
_lowerCamelCase : Optional[Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_lowerCamelCase )
else:
_lowerCamelCase : str = ViTMAEConfig()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
# adapt config
config.update(
{
"mask_ratio": model_args.mask_ratio,
"norm_pix_loss": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
_lowerCamelCase : Union[str, Any] = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_lowerCamelCase )
elif model_args.model_name_or_path:
_lowerCamelCase : Tuple = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_lowerCamelCase )
else:
_lowerCamelCase : Optional[Any] = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
_lowerCamelCase : Dict = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
_lowerCamelCase : Optional[Any] = ViTMAEForPreTraining(_lowerCamelCase )
if training_args.do_train:
_lowerCamelCase : int = ds["train"].column_names
else:
_lowerCamelCase : List[Any] = ds["validation"].column_names
if data_args.image_column_name is not None:
_lowerCamelCase : List[Any] = data_args.image_column_name
elif "image" in column_names:
_lowerCamelCase : int = "image"
elif "img" in column_names:
_lowerCamelCase : Tuple = "img"
else:
_lowerCamelCase : Tuple = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
_lowerCamelCase : Dict = image_processor.size["shortest_edge"]
else:
_lowerCamelCase : Optional[int] = (image_processor.size["height"], image_processor.size["width"])
_lowerCamelCase : Optional[Any] = Compose(
[
Lambda(lambda _lowerCamelCase : img.convert("RGB" ) if img.mode != "RGB" else img ),
RandomResizedCrop(_lowerCamelCase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(_lowerCamelCase ):
_lowerCamelCase : Optional[Any] = [transforms(_lowerCamelCase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
_lowerCamelCase : int = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_lowerCamelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
_lowerCamelCase : Optional[int] = (
ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_lowerCamelCase )
# Compute absolute learning rate
_lowerCamelCase : List[str] = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
_lowerCamelCase : int = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
_lowerCamelCase : Any = Trainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=_lowerCamelCase , data_collator=_lowerCamelCase , )
# Training
if training_args.do_train:
_lowerCamelCase : str = None
if training_args.resume_from_checkpoint is not None:
_lowerCamelCase : Optional[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCamelCase : Union[str, Any] = last_checkpoint
_lowerCamelCase : List[Any] = trainer.train(resume_from_checkpoint=_lowerCamelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_lowerCamelCase : Dict = trainer.evaluate()
trainer.log_metrics("eval" , _lowerCamelCase )
trainer.save_metrics("eval" , _lowerCamelCase )
# Write model card and (optionally) push to hub
_lowerCamelCase : int = {
"tasks": "masked-auto-encoding",
"dataset": data_args.dataset_name,
"tags": ["masked-auto-encoding"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowerCamelCase )
else:
trainer.create_model_card(**_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
main()
if __name__ == "__main__":
main() | 46 |
import math
class __magic_name__ :
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:list[list[float]] , _a:list[int] ):
snake_case__ = 0.0
snake_case__ = 0.0
for i in range(len(_a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:list[list[int | float]] , _a:list[int] , _a:int , _a:float ):
for i in range(len(_a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def SCREAMING_SNAKE_CASE ( ) -> None:
# Training Examples ( m, n )
snake_case__ = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
snake_case__ = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
snake_case__ = SelfOrganizingMap()
snake_case__ = 3
snake_case__ = 0.5
for _ in range(__lowerCAmelCase ):
for j in range(len(__lowerCAmelCase ) ):
# training sample
snake_case__ = training_samples[j]
# Compute the winning vector
snake_case__ = self_organizing_map.get_winner(__lowerCAmelCase , __lowerCAmelCase )
# Update the winning vector
snake_case__ = self_organizing_map.update(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# classify test sample
snake_case__ = [0, 0, 0, 1]
snake_case__ = self_organizing_map.get_winner(__lowerCAmelCase , __lowerCAmelCase )
# results
print(F"""Clusters that the test sample belongs to : {winner}""" )
print(F"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 33 | 0 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class _UpperCamelCase( unittest.TestCase ):
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
__a : Tuple = torch.nn.Linear(1_0 , 1_0 )
__a : Union[str, Any] = torch.optim.SGD(model.parameters() , 0.1 )
__a : Dict = Accelerator()
__a : Union[str, Any] = accelerator.prepare(SCREAMING_SNAKE_CASE__ )
try:
pickle.loads(pickle.dumps(SCREAMING_SNAKE_CASE__ ) )
except Exception as e:
self.fail(f'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 47 |
from __future__ import annotations
from statistics import mean
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list[int]:
snake_case__ = [0] * no_of_processes
snake_case__ = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(__lowerCAmelCase ):
snake_case__ = burst_time[i]
snake_case__ = []
snake_case__ = 0
snake_case__ = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
snake_case__ = []
snake_case__ = -1
for i in range(__lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
snake_case__ = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
snake_case__ = i
total_time += burst_time[target_process]
completed += 1
snake_case__ = 0
snake_case__ = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list[int]:
snake_case__ = [0] * no_of_processes
for i in range(__lowerCAmelCase ):
snake_case__ = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
lowerCamelCase__ : Tuple = 4
lowerCamelCase__ : Union[str, Any] = [2, 5, 3, 7]
lowerCamelCase__ : Optional[Any] = [0, 0, 0, 0]
lowerCamelCase__ : Dict = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowerCamelCase__ : Union[str, Any] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 33 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class A :
def __init__( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = {}
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = {}
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str , __magic_name__ : str , __magic_name__ : float ):
"""simple docstring"""
if nodea not in self.connections:
self.add_node(__magic_name__ )
if nodea not in self.connections:
self.add_node(__magic_name__ )
lowerCAmelCase__ = probability
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
return list(self.connections )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = 0
lowerCAmelCase__ = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def A ( UpperCamelCase_ : str , UpperCamelCase_ : list[tuple[str, str, float]] , UpperCamelCase_ : int ) -> dict[str, int]:
'''simple docstring'''
lowerCAmelCase__ = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = Counter(graph.get_nodes() )
lowerCAmelCase__ = start
for _ in range(UpperCamelCase_ ):
lowerCAmelCase__ = graph.transition(UpperCamelCase_ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 |
lowerCamelCase__ : List[str] = """Alexander Joslin"""
import operator as op
from .stack import Stack
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
snake_case__ = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
snake_case__ = Stack()
snake_case__ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__lowerCAmelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(__lowerCAmelCase )
elif i == ")":
# RULE 4
snake_case__ = operator_stack.peek()
operator_stack.pop()
snake_case__ = operand_stack.peek()
operand_stack.pop()
snake_case__ = operand_stack.peek()
operand_stack.pop()
snake_case__ = operators[opr](__lowerCAmelCase , __lowerCAmelCase )
operand_stack.push(__lowerCAmelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase__ : Optional[Any] = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 33 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( snake_case_ :list , snake_case_ :int ):
# Checks if the entire collection has been sorted
if len(snake_case_ ) <= 1 or n <= 1:
return
insert_next(snake_case_ , n - 1 )
rec_insertion_sort(snake_case_ , n - 1 )
def lowercase__ ( snake_case_ :list , snake_case_ :int ):
# Checks order between adjacent elements
if index >= len(snake_case_ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
__UpperCAmelCase , __UpperCAmelCase = (
collection[index],
collection[index - 1],
)
insert_next(snake_case_ , index + 1 )
if __name__ == "__main__":
_lowercase : Any = input('Enter integers separated by spaces: ')
_lowercase : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 49 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowerCamelCase__ : int = logging.get_logger(__name__)
class __magic_name__ (snake_case_ ):
'''simple docstring'''
def __init__( self:List[Any] , *_a:Dict , **_a:Tuple ):
warnings.warn(
'''The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PerceiverImageProcessor instead.''' , _a , )
super().__init__(*_a , **_a )
| 33 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self ):
torch.manual_seed(0 )
lowerCamelCase__ = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,)
return model
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.dummy_uncond_unet
lowerCamelCase__ = PNDMScheduler()
lowerCamelCase__ = PNDMPipeline(unet=_lowerCAmelCase ,scheduler=_lowerCAmelCase )
pndm.to(_lowerCAmelCase )
pndm.set_progress_bar_config(disable=_lowerCAmelCase )
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = pndm(generator=_lowerCAmelCase ,num_inference_steps=20 ,output_type="""numpy""" ).images
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = pndm(generator=_lowerCAmelCase ,num_inference_steps=20 ,output_type="""numpy""" ,return_dict=_lowerCAmelCase )[0]
lowerCamelCase__ = image[0, -3:, -3:, -1]
lowerCamelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase__ = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """google/ddpm-cifar10-32"""
lowerCamelCase__ = UNetaDModel.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = PNDMScheduler()
lowerCamelCase__ = PNDMPipeline(unet=_lowerCAmelCase ,scheduler=_lowerCAmelCase )
pndm.to(_lowerCAmelCase )
pndm.set_progress_bar_config(disable=_lowerCAmelCase )
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = pndm(generator=_lowerCAmelCase ,output_type="""numpy""" ).images
lowerCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase__ = np.array([0.1564, 0.1_4645, 0.1406, 0.1_4715, 0.1_2425, 0.1_4045, 0.1_3115, 0.1_2175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 50 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ : Tuple = {
"""configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""],
"""tokenization_roberta""": ["""RobertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = ["""RobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[int] = [
"""ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaForCausalLM""",
"""RobertaForMaskedLM""",
"""RobertaForMultipleChoice""",
"""RobertaForQuestionAnswering""",
"""RobertaForSequenceClassification""",
"""RobertaForTokenClassification""",
"""RobertaModel""",
"""RobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : List[str] = [
"""TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaForCausalLM""",
"""TFRobertaForMaskedLM""",
"""TFRobertaForMultipleChoice""",
"""TFRobertaForQuestionAnswering""",
"""TFRobertaForSequenceClassification""",
"""TFRobertaForTokenClassification""",
"""TFRobertaMainLayer""",
"""TFRobertaModel""",
"""TFRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : str = [
"""FlaxRobertaForCausalLM""",
"""FlaxRobertaForMaskedLM""",
"""FlaxRobertaForMultipleChoice""",
"""FlaxRobertaForQuestionAnswering""",
"""FlaxRobertaForSequenceClassification""",
"""FlaxRobertaForTokenClassification""",
"""FlaxRobertaModel""",
"""FlaxRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 33 | 0 |
'''simple docstring'''
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
a__ : int = input('Enter image url: ').strip()
print(F"""Downloading image from {url} ...""")
a__ : Dict = BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
a__ : List[str] = soup.find('meta', {'property': 'og:image'})['content']
a__ : Optional[int] = requests.get(image_url).content
a__ : Tuple = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(F"""Done. Image saved to disk as {file_name}.""")
| 51 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[Any]:
snake_case__ = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __magic_name__ (snake_case_ ,snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Dict = StableDiffusionLatentUpscalePipeline
__lowercase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
__lowercase : List[Any] = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
__lowercase : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowercase : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__lowercase : List[Any] = frozenset([] )
__lowercase : Any = True
@property
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = 1
snake_case__ = 4
snake_case__ = (16, 16)
snake_case__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_a )
return image
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
torch.manual_seed(0 )
snake_case__ = UNetaDConditionModel(
act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=_a , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) , in_channels=8 , mid_block_type=_a , only_cross_attention=_a , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , )
snake_case__ = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
snake_case__ = EulerDiscreteScheduler(prediction_type='''sample''' )
snake_case__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''quick_gelu''' , projection_dim=5_12 , )
snake_case__ = CLIPTextModel(_a )
snake_case__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case__ = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:Optional[Any] , _a:List[str]=0 ):
if str(_a ).startswith('''mps''' ):
snake_case__ = torch.manual_seed(_a )
else:
snake_case__ = torch.Generator(device=_a ).manual_seed(_a )
snake_case__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = '''cpu'''
snake_case__ = self.get_dummy_components()
snake_case__ = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = self.get_dummy_inputs(_a )
snake_case__ = pipe(**_a ).images
snake_case__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_56, 2_56, 3) )
snake_case__ = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
snake_case__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_a , 1e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:str ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
super().test_save_load_local(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:str ):
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
snake_case__ = self.get_dummy_components()
snake_case__ = self.pipeline_class(**_a )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = self.get_dummy_inputs(_a )
snake_case__ = 2
snake_case__ = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
snake_case__ = getattr(_a , scheduler_enum.name )
snake_case__ = scheduler_cls.from_config(pipe.scheduler.config )
snake_case__ = pipe(**_a )[0]
outputs.append(_a )
assert check_same_shape(_a )
@require_torch_gpu
@slow
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = torch.manual_seed(33 )
snake_case__ = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
snake_case__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
snake_case__ = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
snake_case__ = pipe(_a , generator=_a , output_type='''latent''' ).images
snake_case__ = upscaler(
prompt=_a , image=_a , num_inference_steps=20 , guidance_scale=0 , generator=_a , output_type='''np''' , ).images[0]
snake_case__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5e-2
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = torch.manual_seed(33 )
snake_case__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
snake_case__ = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
snake_case__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
snake_case__ = upscaler(
prompt=_a , image=_a , num_inference_steps=20 , guidance_scale=0 , generator=_a , output_type='''np''' , ).images[0]
snake_case__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5e-2
| 33 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = ProphetNetTokenizer
__lowerCAmelCase = False
def _lowerCamelCase ( self ):
super().setUp()
__a : List[Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__a : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : List[str] = '''UNwant\u00E9d,running'''
__a : List[Any] = '''unwanted, running'''
return input_text, output_text
def _lowerCamelCase ( self ):
__a : Optional[Any] = self.tokenizer_class(self.vocab_file )
__a : Any = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_UpperCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def _lowerCamelCase ( self ):
__a : Any = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def _lowerCamelCase ( self ):
__a : Optional[int] = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _lowerCamelCase ( self ):
__a : List[Any] = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def _lowerCamelCase ( self ):
__a : Optional[Any] = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _lowerCamelCase ( self ):
__a : int = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _lowerCamelCase ( self ):
__a : List[Any] = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _lowerCamelCase ( self ):
__a : List[Any] = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _lowerCamelCase ( self ):
__a : Any = BasicTokenizer(do_lower_case=_UpperCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def _lowerCamelCase ( self ):
__a : Tuple = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
__a : str = {}
for i, token in enumerate(_UpperCAmelCase ):
__a : Tuple = i
__a : Dict = WordpieceTokenizer(vocab=_UpperCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
@require_torch
def _lowerCamelCase ( self ):
__a : Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
__a : Tuple = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__a : List[Any] = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102]
__a : List[str] = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
__a : Dict = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def _lowerCamelCase ( self ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def _lowerCamelCase ( self ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def _lowerCamelCase ( self ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
@slow
def _lowerCamelCase ( self ):
__a : Tuple = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
__a : List[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=_UpperCAmelCase )
__a : Any = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_UpperCAmelCase )
__a : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
__a : List[str] = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102] | 52 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = '''ZinengTang/tvlt-base'''
snake_case__ = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE__ ( self:Dict , **_a:List[Any] ):
return TvltImageProcessor.from_pretrained(self.checkpoint , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , **_a:Tuple ):
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
processor.save_pretrained(self.tmpdirname )
snake_case__ = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , _a )
self.assertIsInstance(processor.image_processor , _a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
snake_case__ = np.ones([1_20_00] )
snake_case__ = feature_extractor(_a , return_tensors='''np''' )
snake_case__ = processor(audio=_a , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
snake_case__ = np.ones([3, 2_24, 2_24] )
snake_case__ = image_processor(_a , return_tensors='''np''' )
snake_case__ = processor(images=_a , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
snake_case__ = np.ones([1_20_00] )
snake_case__ = np.ones([3, 2_24, 2_24] )
snake_case__ = processor(audio=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 33 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : str = logging.get_logger(__name__)
_snake_case : Any = {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = """speech_to_text_2"""
a_ = ["""past_key_values"""]
a_ = {"""num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Tuple , lowerCAmelCase_ : str=1_0_0_0_0 , lowerCAmelCase_ : Optional[int]=6 , lowerCAmelCase_ : Union[str, Any]=2_0_4_8 , lowerCAmelCase_ : int=4 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Union[str, Any]="relu" , lowerCAmelCase_ : int=2_5_6 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Dict=0.02 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : str=1_0_2_4 , **lowerCAmelCase_ : Optional[Any] , ) -> Optional[Any]:
__lowerCAmelCase = vocab_size
__lowerCAmelCase = d_model
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = activation_function
__lowerCAmelCase = init_std
__lowerCAmelCase = decoder_layerdrop
__lowerCAmelCase = use_cache
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCAmelCase = max_target_positions
super().__init__(
pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 53 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Optional[int] = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Optional[int] = 'data2vec-vision'
def __init__( self:int , _a:Tuple=7_68 , _a:int=12 , _a:Any=12 , _a:Optional[int]=30_72 , _a:Optional[int]="gelu" , _a:Any=0.0 , _a:Any=0.0 , _a:List[str]=0.02 , _a:Dict=1e-12 , _a:Tuple=2_24 , _a:Any=16 , _a:str=3 , _a:str=False , _a:Union[str, Any]=False , _a:Optional[int]=False , _a:Any=False , _a:Dict=0.1 , _a:Dict=0.1 , _a:str=True , _a:str=[3, 5, 7, 11] , _a:List[str]=[1, 2, 3, 6] , _a:List[str]=True , _a:Any=0.4 , _a:str=2_56 , _a:Union[str, Any]=1 , _a:int=False , _a:Optional[int]=2_55 , **_a:Dict , ):
super().__init__(**_a )
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = initializer_range
snake_case__ = layer_norm_eps
snake_case__ = image_size
snake_case__ = patch_size
snake_case__ = num_channels
snake_case__ = use_mask_token
snake_case__ = use_absolute_position_embeddings
snake_case__ = use_relative_position_bias
snake_case__ = use_shared_relative_position_bias
snake_case__ = layer_scale_init_value
snake_case__ = drop_path_rate
snake_case__ = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case__ = out_indices
snake_case__ = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case__ = use_auxiliary_head
snake_case__ = auxiliary_loss_weight
snake_case__ = auxiliary_channels
snake_case__ = auxiliary_num_convs
snake_case__ = auxiliary_concat_input
snake_case__ = semantic_loss_ignore_index
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Any = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return 1e-4
| 33 | 0 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__lowercase : Optional[int] =logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: str , _lowerCAmelCase: Path , _lowerCAmelCase: Union[str, None] = None , _lowerCAmelCase: Union[List[str], None] = None , _lowerCAmelCase: Union[str, List[str], None] = None , _lowerCAmelCase: bool = True , ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =[file for file in os.listdir(_lowerCAmelCase ) if os.path.isfile(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )]
if identifier is not None:
UpperCAmelCase_ =[file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
for n_ in n_identifier:
UpperCAmelCase_ =[file for file in files if n_ not in file]
else:
UpperCAmelCase_ =[file for file in files if n_identifier not in file]
UpperCAmelCase_ =ignore_files or []
ignore_files.append("__init__.py" )
UpperCAmelCase_ =[file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , _lowerCAmelCase )
if only_modules:
UpperCAmelCase_ =file.split("." )[0]
try:
UpperCAmelCase_ =getattr(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =doctest.DocTestSuite(_lowerCAmelCase )
UpperCAmelCase_ =unittest.TextTestRunner().run(_lowerCAmelCase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'{module_identifier} is not a module.' )
else:
UpperCAmelCase_ =doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def lowerCAmelCase__ ( self: Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =Path("src/transformers" )
UpperCAmelCase_ ="modeling"
UpperCAmelCase_ =[
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(_lowerCAmelCase , identifier=_lowerCAmelCase , ignore_files=_lowerCAmelCase )
def lowerCAmelCase__ ( self: Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =Path("src/transformers" )
UpperCAmelCase_ ="tokenization"
self.analyze_directory(_lowerCAmelCase , identifier=_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =Path("src/transformers" )
UpperCAmelCase_ ="configuration"
self.analyze_directory(_lowerCAmelCase , identifier=_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[str] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =Path("src/transformers" )
UpperCAmelCase_ =["configuration", "modeling", "tokenization"]
self.analyze_directory(_lowerCAmelCase , n_identifier=_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =Path("docs/source" )
UpperCAmelCase_ =["favicon.ico"]
self.analyze_directory(_lowerCAmelCase , ignore_files=_lowerCAmelCase , only_modules=_lowerCAmelCase )
| 54 |
import os
import sys
lowerCamelCase__ : Optional[int] = os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowerCamelCase__ : Optional[int] = [
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Any:
return AutoConfig.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[str]:
return AutoTokenizer.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModel.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Tuple:
return AutoModel.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]:
return AutoModelForCausalLM.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[Any]:
return AutoModelForMaskedLM.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[str]:
return AutoModelForSequenceClassification.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]:
return AutoModelForQuestionAnswering.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
| 33 | 0 |
from manim import *
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self : str ):
__A = Rectangle(height=0.5 ,width=0.5 )
__A = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
__A = [mem.copy() for i in range(6 )]
__A = [mem.copy() for i in range(6 )]
__A = VGroup(*A ).arrange(A ,buff=0 )
__A = VGroup(*A ).arrange(A ,buff=0 )
__A = VGroup(A ,A ).arrange(A ,buff=0 )
__A = Text("CPU" ,font_size=24 )
__A = Group(A ,A ).arrange(A ,buff=0.5 ,aligned_edge=A )
cpu.move_to([-2.5, -0.5, 0] )
self.add(A )
__A = [mem.copy() for i in range(1 )]
__A = VGroup(*A ).arrange(A ,buff=0 )
__A = Text("GPU" ,font_size=24 )
__A = Group(A ,A ).arrange(A ,buff=0.5 ,aligned_edge=A )
gpu.align_to(A ,A )
gpu.set_x(gpu.get_x() - 1 )
self.add(A )
__A = [mem.copy() for i in range(6 )]
__A = VGroup(*A ).arrange(A ,buff=0 )
__A = Text("Model" ,font_size=24 )
__A = Group(A ,A ).arrange(A ,buff=0.5 ,aligned_edge=A )
model.move_to([3, -1.0, 0] )
self.play(
Create(A ,run_time=1 ) ,Create(A ,run_time=1 ) ,Create(A ,run_time=1 ) ,)
__A = MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' ,font_size=24 ,)
__A = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__A = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(A ,run_time=2.5 ) ,Write(A ) ,Write(A ) )
self.add(A )
__A = []
__A = []
__A = []
for i, rect in enumerate(A ):
__A = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(A ,opacity=0.7 )
cpu_target.move_to(A )
cpu_target.generate_target()
__A = 0.46 / 4
__A = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.02 ,direction=A )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target ,direction=A ,buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=A ,buff=0.0 )
cpu_targs.append(A )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(A ) )
second_animations.append(MoveToTarget(A ,run_time=1.5 ) )
self.play(*A )
self.play(*A )
self.wait()
| 55 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : str = (CMStochasticIterativeScheduler,)
__lowercase : List[str] = 10
def SCREAMING_SNAKE_CASE__ ( self:int , **_a:Optional[int] ):
snake_case__ = {
'''num_train_timesteps''': 2_01,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
config.update(**_a )
return config
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = 10
snake_case__ = self.get_scheduler_config()
snake_case__ = self.scheduler_classes[0](**_a )
scheduler.set_timesteps(_a )
snake_case__ = scheduler.timesteps[0]
snake_case__ = scheduler.timesteps[1]
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
snake_case__ = scheduler.step(_a , _a , _a ).prev_sample
snake_case__ = scheduler.step(_a , _a , _a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_a )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=_a )
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = 1
scheduler.set_timesteps(_a )
snake_case__ = scheduler.timesteps
snake_case__ = torch.manual_seed(0 )
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(_a ):
# 1. scale model input
snake_case__ = scheduler.scale_model_input(_a , _a )
# 2. predict noise residual
snake_case__ = model(_a , _a )
# 3. predict previous sample x_t-1
snake_case__ = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
snake_case__ = pred_prev_sample
snake_case__ = torch.sum(torch.abs(_a ) )
snake_case__ = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 192.7614 ) < 1e-2
assert abs(result_mean.item() - 0.2510 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = [1_06, 0]
scheduler.set_timesteps(timesteps=_a )
snake_case__ = scheduler.timesteps
snake_case__ = torch.manual_seed(0 )
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
snake_case__ = scheduler.scale_model_input(_a , _a )
# 2. predict noise residual
snake_case__ = model(_a , _a )
# 3. predict previous sample x_t-1
snake_case__ = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
snake_case__ = pred_prev_sample
snake_case__ = torch.sum(torch.abs(_a ) )
snake_case__ = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 347.6357 ) < 1e-2
assert abs(result_mean.item() - 0.4527 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = [39, 30, 12, 15, 0]
with self.assertRaises(_a , msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_a )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = [39, 30, 12, 1, 0]
snake_case__ = len(_a )
with self.assertRaises(_a , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_a , timesteps=_a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_a , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_a )
| 33 | 0 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
_a : Optional[Any] = [
"good first issue",
"feature request",
"wip",
]
def _a () -> Any:
"""simple docstring"""
__snake_case = Github(os.environ['GITHUB_TOKEN'] )
__snake_case = g.get_repo('huggingface/accelerate' )
__snake_case = repo.get_issues(state='open' )
for issue in open_issues:
__snake_case = sorted([comment for comment in issue.get_comments()] , key=lambda lowercase__ : i.created_at , reverse=lowercase__ )
__snake_case = comments[0] if len(lowercase__ ) > 0 else None
__snake_case = dt.utcnow()
__snake_case = (current_time - issue.updated_at).days
__snake_case = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 2_3
and days_since_creation >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 56 |
import numpy as np
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> np.ndarray:
return 1 / (1 + np.exp(-vector ))
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> np.ndarray:
return vector * sigmoid(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : Union[str, Any] =KandinskyVaaControlnetImgaImgPipeline
a : List[Any] =['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
a : List[str] =['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
a : Union[str, Any] =[
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
a : List[Any] =False
@property
def _a ( self ):
return 3_2
@property
def _a ( self ):
return 3_2
@property
def _a ( self ):
return self.time_input_dim
@property
def _a ( self ):
return self.time_input_dim * 4
@property
def _a ( self ):
return 1_0_0
@property
def _a ( self ):
torch.manual_seed(0 )
UpperCamelCase_: Dict = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
UpperCamelCase_: str = UNetaDConditionModel(**_lowerCamelCase )
return model
@property
def _a ( self ):
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _a ( self ):
torch.manual_seed(0 )
UpperCamelCase_: Tuple = VQModel(**self.dummy_movq_kwargs )
return model
def _a ( self ):
UpperCamelCase_: Union[str, Any] = self.dummy_unet
UpperCamelCase_: List[Any] = self.dummy_movq
UpperCamelCase_: Optional[int] = {
'num_train_timesteps': 1_0_0_0,
'beta_schedule': 'linear',
'beta_start': 0.0_0_0_8_5,
'beta_end': 0.0_1_2,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
UpperCamelCase_: Tuple = DDIMScheduler(**_lowerCamelCase )
UpperCamelCase_: Tuple = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def _a ( self , _lowerCamelCase , _lowerCamelCase=0 ):
UpperCamelCase_: Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
UpperCamelCase_: Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowerCamelCase )
# create init_image
UpperCamelCase_: Any = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
UpperCamelCase_: Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase_: Optional[Any] = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
# create hint
UpperCamelCase_: Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
if str(_lowerCamelCase ).startswith('mps' ):
UpperCamelCase_: Tuple = torch.manual_seed(_lowerCamelCase )
else:
UpperCamelCase_: int = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
UpperCamelCase_: str = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 1_0,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def _a ( self ):
UpperCamelCase_: Union[str, Any] = 'cpu'
UpperCamelCase_: str = self.get_dummy_components()
UpperCamelCase_: str = self.pipeline_class(**_lowerCamelCase )
UpperCamelCase_: Dict = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) )
UpperCamelCase_: Union[str, Any] = output.images
UpperCamelCase_: str = pipe(
**self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0]
UpperCamelCase_: Tuple = image[0, -3:, -3:, -1]
UpperCamelCase_: Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase_: int = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ):
UpperCamelCase_: Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
UpperCamelCase_: List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
UpperCamelCase_: List[str] = init_image.resize((5_1_2, 5_1_2) )
UpperCamelCase_: str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
UpperCamelCase_: Optional[Any] = torch.from_numpy(np.array(_lowerCamelCase ) ).float() / 2_5_5.0
UpperCamelCase_: Optional[int] = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
UpperCamelCase_: Union[str, Any] = 'A robot, 4k photo'
UpperCamelCase_: int = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
UpperCamelCase_: str = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
UpperCamelCase_: Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase_ ,UpperCamelCase_: Optional[Any] = pipe_prior(
_lowerCamelCase , image=_lowerCamelCase , strength=0.8_5 , generator=_lowerCamelCase , negative_prompt='' , ).to_tuple()
UpperCamelCase_: Tuple = pipeline(
image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , hint=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=1_0_0 , height=5_1_2 , width=5_1_2 , strength=0.5 , output_type='np' , )
UpperCamelCase_: Tuple = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase ) | 57 |
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 100 ) -> int:
snake_case__ = set()
snake_case__ = 0
snake_case__ = n + 1 # maximum limit
for a in range(2 , __lowerCAmelCase ):
for b in range(2 , __lowerCAmelCase ):
snake_case__ = a**b # calculates the current power
collect_powers.add(__lowerCAmelCase ) # adds the result to the set
return len(__lowerCAmelCase )
if __name__ == "__main__":
print("""Number of terms """, solution(int(str(input()).strip())))
| 33 | 0 |
"""simple docstring"""
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = None
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ : Tuple = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , _lowercase )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : str = os.path.join(_lowercase , """feat_extract.json""" )
feat_extract_first.to_json_file(_lowercase )
snake_case_ : str = self.feature_extraction_class.from_json_file(_lowercase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Tuple = feat_extract_first.save_pretrained(_lowercase )[0]
check_json_file_has_correct_format(_lowercase )
snake_case_ : Dict = self.feature_extraction_class.from_pretrained(_lowercase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : int = self.feature_extraction_class()
self.assertIsNotNone(_lowercase )
| 58 |
from copy import deepcopy
class __magic_name__ :
'''simple docstring'''
def __init__( self:int , _a:list[int] | None = None , _a:int | None = None ):
if arr is None and size is not None:
snake_case__ = size
snake_case__ = [0] * size
elif arr is not None:
self.init(_a )
else:
raise ValueError('''Either arr or size must be specified''' )
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:list[int] ):
snake_case__ = len(_a )
snake_case__ = deepcopy(_a )
for i in range(1 , self.size ):
snake_case__ = self.next_(_a )
if j < self.size:
self.tree[j] += self.tree[i]
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
snake_case__ = self.next_(_a )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _a:int ):
return index + (index & (-index))
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _a:int ):
return index - (index & (-index))
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:int , _a:int ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
snake_case__ = self.next_(_a )
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:int ):
self.add(_a , value - self.get(_a ) )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:int ):
if right == 0:
return 0
snake_case__ = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
snake_case__ = self.prev(_a )
return result
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:int ):
return self.prefix(_a ) - self.prefix(_a )
def SCREAMING_SNAKE_CASE__ ( self:str , _a:int ):
return self.query(_a , index + 1 )
def SCREAMING_SNAKE_CASE__ ( self:str , _a:int ):
value -= self.tree[0]
if value < 0:
return -1
snake_case__ = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
snake_case__ = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33 | 0 |
import unittest
from transformers import DonutProcessor
__A = "naver-clova-ix/donut-base"
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->str:
'''simple docstring'''
lowerCamelCase__: int =DonutProcessor.from_pretrained(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->str:
'''simple docstring'''
lowerCamelCase__: Any ={
"name": "John Doe",
"age": "99",
"city": "Atlanta",
"state": "GA",
"zip": "30301",
"phone": "123-4567",
"nicknames": [{"nickname": "Johnny"}, {"nickname": "JD"}],
}
lowerCamelCase__: Tuple =(
"<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"
"<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"
"<s_nicknames><s_nickname>Johnny</s_nickname>"
"<sep/><s_nickname>JD</s_nickname></s_nicknames>"
)
lowerCamelCase__: Optional[int] =self.processor.tokenajson(UpperCAmelCase_)
self.assertDictEqual(UpperCAmelCase_ , UpperCAmelCase_)
| 59 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class __magic_name__ :
'''simple docstring'''
__lowercase : int = BlenderbotConfig
__lowercase : Any = {}
__lowercase : Optional[Any] = 'gelu'
def __init__( self:Tuple , _a:Optional[Any] , _a:Optional[Any]=13 , _a:Tuple=7 , _a:Union[str, Any]=True , _a:int=False , _a:int=99 , _a:Optional[int]=32 , _a:List[str]=2 , _a:List[str]=4 , _a:List[Any]=37 , _a:Any=0.1 , _a:int=0.1 , _a:List[Any]=20 , _a:List[str]=2 , _a:int=1 , _a:Dict=0 , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = eos_token_id
snake_case__ = pad_token_id
snake_case__ = bos_token_id
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case__ = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case__ = prepare_blenderbot_inputs_dict(_a , _a , _a )
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self:int , _a:Optional[Any] , _a:int ):
snake_case__ = TFBlenderbotModel(config=_a ).get_decoder()
snake_case__ = inputs_dict['''input_ids''']
snake_case__ = input_ids[:1, :]
snake_case__ = inputs_dict['''attention_mask'''][:1, :]
snake_case__ = inputs_dict['''head_mask''']
snake_case__ = 1
# first forward pass
snake_case__ = model(_a , attention_mask=_a , head_mask=_a , use_cache=_a )
snake_case__ , snake_case__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
snake_case__ = tf.concat([input_ids, next_tokens] , axis=-1 )
snake_case__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
snake_case__ = model(_a , attention_mask=_a )[0]
snake_case__ = model(_a , attention_mask=_a , past_key_values=_a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
snake_case__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
snake_case__ = output_from_no_past[:, -3:, random_slice_idx]
snake_case__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_a , _a , rtol=1e-3 )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> Tuple:
if attention_mask is None:
snake_case__ = tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : List[str] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__lowercase : Any = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__lowercase : Tuple = (
{
'conversational': TFBlenderbotForConditionalGeneration,
'feature-extraction': TFBlenderbotModel,
'summarization': TFBlenderbotForConditionalGeneration,
'text2text-generation': TFBlenderbotForConditionalGeneration,
'translation': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowercase : Any = True
__lowercase : int = False
__lowercase : int = False
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = TFBlenderbotModelTester(self )
snake_case__ = ConfigTester(self , config_class=_a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_a )
@require_tokenizers
@require_tf
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[int] = ['My friends are cool but they eat too many carbs.']
__lowercase : Optional[int] = 'facebook/blenderbot-400M-distill'
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.tokenizer(self.src_text , return_tensors='''tf''' )
snake_case__ = self.model.generate(
model_inputs.input_ids , )
snake_case__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_a )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 33 | 0 |
import math
import sys
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : Any = ''''''
try:
with open(_UpperCamelCase , '''rb''' ) as binary_file:
snake_case_ : Dict = binary_file.read()
for dat in data:
snake_case_ : Union[str, Any] = f'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : Any = {'''0''': '''0''', '''1''': '''1'''}
snake_case_ , snake_case_ : List[Any] = '''''', ''''''
snake_case_ : List[str] = len(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
snake_case_ : List[str] = lexicon[curr_string]
result += last_match_id
snake_case_ : int = last_match_id + '''0'''
if math.loga(_UpperCamelCase ).is_integer():
snake_case_ : int = {}
for curr_key in list(_UpperCamelCase ):
snake_case_ : Tuple = lexicon.pop(_UpperCamelCase )
snake_case_ : Optional[int] = new_lex
snake_case_ : str = last_match_id + '''1'''
index += 1
snake_case_ : Tuple = ''''''
return result
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> None:
"""simple docstring"""
snake_case_ : Optional[Any] = 8
try:
with open(_UpperCamelCase , '''wb''' ) as opened_file:
snake_case_ : Tuple = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_UpperCamelCase , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : Union[str, Any] = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
snake_case_ : Union[str, Any] = data_bits[counter:]
snake_case_ : Optional[int] = data_bits[counter + 1 :]
return data_bits
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> None:
"""simple docstring"""
snake_case_ : Optional[Any] = read_file_binary(_UpperCamelCase )
snake_case_ : int = remove_prefix(_UpperCamelCase )
snake_case_ : Dict = decompress_data(_UpperCamelCase )
write_file_binary(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 60 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = 0
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:str ):
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
snake_case__ = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
snake_case__ = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
snake_case__ = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
snake_case__ = AutoImageProcessor.from_pretrained(_a ).to_dict()
config_dict.pop('''image_processor_type''' )
snake_case__ = CLIPImageProcessor(**_a )
# save in new folder
model_config.save_pretrained(_a )
config.save_pretrained(_a )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
# make sure private variable is not incorrectly saved
snake_case__ = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
with self.assertRaisesRegex(
_a , '''clip-base is not a local folder and is not a valid model identifier''' ):
snake_case__ = AutoImageProcessor.from_pretrained('''clip-base''' )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
with self.assertRaisesRegex(
_a , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
snake_case__ = AutoImageProcessor.from_pretrained(_a , revision='''aaaaaa''' )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
with self.assertRaisesRegex(
_a , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_a ):
snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
snake_case__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
snake_case__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
snake_case__ = AutoImageProcessor.from_pretrained(_a , trust_remote_code=_a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoImageProcessor.register(_a , _a )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
snake_case__ = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
snake_case__ = CustomImageProcessor.from_pretrained(_a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : List[str] = True
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# If remote code is not set, the default is to use local
snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
snake_case__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
snake_case__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(_a , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 33 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.