code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple=3 , SCREAMING_SNAKE_CASE__ : List[str]=32 , SCREAMING_SNAKE_CASE__ : List[str]=3 , SCREAMING_SNAKE_CASE__ : Dict=10 , SCREAMING_SNAKE_CASE__ : str=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE__ : Any=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Any="relu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=3 , SCREAMING_SNAKE_CASE__ : Dict=None , ) -> Dict:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = embeddings_size
lowerCAmelCase__ = hidden_sizes
lowerCAmelCase__ = depths
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = scope
lowerCAmelCase__ = len(SCREAMING_SNAKE_CASE__ )
def a ( self : List[str] ) -> List[str]:
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def a ( self : Tuple ) -> str:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any ) -> Dict:
lowerCAmelCase__ = RegNetModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]:
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = RegNetForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a ( self : List[Any] ) -> int:
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case__ = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
snake_case__ = (
{"feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def a ( self : List[Any] ) -> List[Any]:
lowerCAmelCase__ = RegNetModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ )
def a ( self : Union[str, Any] ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a ( self : Any ) -> Dict:
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def a ( self : Union[str, Any] ) -> Dict:
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def a ( self : List[Any] ) -> Dict:
pass
def a ( self : Tuple ) -> Dict:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def a ( self : Dict ) -> Dict:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def a ( self : Any ) -> List[Any]:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(config=SCREAMING_SNAKE_CASE__ )
for name, module in model.named_modules():
if isinstance(SCREAMING_SNAKE_CASE__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
def a ( self : int ) -> Optional[Any]:
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase__ = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCAmelCase__ = layer_type
lowerCAmelCase__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def a ( self : str ) -> List[str]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def a ( self : Dict ) -> List[str]:
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = RegNetModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def a ( self : Dict ) -> List[Any]:
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def a ( self : Tuple ) -> Any:
lowerCAmelCase__ = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
lowerCAmelCase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = torch.tensor([-0.4_180, -1.5_051, -3.4_836] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
| 61 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCAmelCase : Tuple = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCAmelCase : Optional[int] = TaTokenizerFast
lowerCAmelCase : Any = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCAmelCase : Tuple = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
)
| 3 | 0 |
def lowerCamelCase__ ( ):
"""simple docstring"""
return 1
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowercase )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(lowercase )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(lowercase )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(lowercase )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(lowercase )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(lowercase )
def lowerCamelCase__ ( lowercase = 200 ):
"""simple docstring"""
return two_pound(lowercase )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 62 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=False , A_=True , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , )-> Dict:
'''simple docstring'''
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size if size is not None else {'height': 18, 'width': 20}
UpperCamelCase = do_thumbnail
UpperCamelCase = do_align_axis
UpperCamelCase = do_pad
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( snake_case_ , unittest.TestCase):
lowerCAmelCase_ = DonutImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
UpperCamelCase = DonutImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'do_thumbnail' ) )
self.assertTrue(hasattr(A_ , 'do_align_long_axis' ) )
self.assertTrue(hasattr(A_ , 'do_pad' ) )
self.assertTrue(hasattr(A_ , 'do_normalize' ) )
self.assertTrue(hasattr(A_ , 'image_mean' ) )
self.assertTrue(hasattr(A_ , 'image_std' ) )
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
pass
@is_flaky()
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 3 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : Any = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[Any] = 'van'
def __init__( self : Union[str, Any] , __lowercase : int=224 , __lowercase : List[Any]=3 , __lowercase : List[str]=[7, 3, 3, 3] , __lowercase : Tuple=[4, 2, 2, 2] , __lowercase : Any=[64, 128, 320, 512] , __lowercase : str=[3, 3, 12, 3] , __lowercase : Tuple=[8, 8, 4, 4] , __lowercase : Union[str, Any]="gelu" , __lowercase : Optional[int]=0.02 , __lowercase : Union[str, Any]=1e-6 , __lowercase : Tuple=1e-2 , __lowercase : int=0.0 , __lowercase : Any=0.0 , **__lowercase : Any , ) -> List[str]:
super().__init__(**__lowercase )
__UpperCAmelCase : Dict = image_size
__UpperCAmelCase : Dict = num_channels
__UpperCAmelCase : List[str] = patch_sizes
__UpperCAmelCase : Tuple = strides
__UpperCAmelCase : List[Any] = hidden_sizes
__UpperCAmelCase : Tuple = depths
__UpperCAmelCase : Optional[int] = mlp_ratios
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Dict = layer_norm_eps
__UpperCAmelCase : Union[str, Any] = layer_scale_init_value
__UpperCAmelCase : Optional[int] = drop_path_rate
__UpperCAmelCase : Optional[int] = dropout_rate
| 63 |
'''simple docstring'''
def A_( A : list[int]):
UpperCamelCase = []
if len(A) == 1:
return [nums.copy()]
for _ in range(len(A)):
UpperCamelCase = nums.pop(0)
UpperCamelCase = permute(A)
for perm in permutations:
perm.append(A)
result.extend(A)
nums.append(A)
return result
def A_( A : str):
def backtrack(A : str):
if start == len(A) - 1:
output.append(nums[:])
else:
for i in range(A , len(A)):
UpperCamelCase , UpperCamelCase = nums[i], nums[start]
backtrack(start + 1)
UpperCamelCase , UpperCamelCase = nums[i], nums[start] # backtrack
UpperCamelCase = []
backtrack(0)
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
lowerCAmelCase : Dict = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 3 | 0 |
def A__ ( snake_case_ : list ):
SCREAMING_SNAKE_CASE__: Dict= 0
while len(snake_case_ ) > 1:
SCREAMING_SNAKE_CASE__: Any= 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
SCREAMING_SNAKE_CASE__: Optional[int]= files.index(min(snake_case_ ) )
temp += files[min_index]
files.pop(snake_case_ )
files.append(snake_case_ )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def A_( A : float , A : float , A : int):
UpperCamelCase = x
UpperCamelCase = y
for step in range(A): # noqa: B007
UpperCamelCase = a * a - b * b + x
UpperCamelCase = 2 * a * b + y
UpperCamelCase = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def A_( A : float):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def A_( A : float):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(A , 1 , 1))
def A_( A : int = 800 , A : int = 600 , A : float = -0.6 , A : float = 0 , A : float = 3.2 , A : int = 50 , A : bool = True , ):
UpperCamelCase = Image.new('RGB' , (image_width, image_height))
UpperCamelCase = img.load()
# loop through the image-coordinates
for image_x in range(A):
for image_y in range(A):
# determine the figure-coordinates based on the image-coordinates
UpperCamelCase = figure_width / image_width * image_height
UpperCamelCase = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCamelCase = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCamelCase = get_distance(A , A , A)
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCamelCase = get_color_coded_rgb(A)
else:
UpperCamelCase = get_black_and_white_rgb(A)
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCAmelCase : Any = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 3 | 0 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__UpperCAmelCase = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class __lowercase :
snake_case_ = PegasusConfig
snake_case_ = {}
snake_case_ = """gelu"""
def __init__( self : List[Any] ,A : int ,A : Optional[Any]=13 ,A : Dict=7 ,A : Dict=True ,A : Any=False ,A : Dict=99 ,A : int=32 ,A : Optional[int]=5 ,A : Union[str, Any]=4 ,A : Union[str, Any]=37 ,A : str=0.1 ,A : int=0.1 ,A : Optional[int]=20 ,A : Tuple=2 ,A : str=1 ,A : Optional[Any]=0 ,):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = parent
UpperCAmelCase__ : Union[str, Any] = batch_size
UpperCAmelCase__ : List[Any] = seq_length
UpperCAmelCase__ : int = is_training
UpperCAmelCase__ : Any = use_labels
UpperCAmelCase__ : int = vocab_size
UpperCAmelCase__ : Dict = hidden_size
UpperCAmelCase__ : Optional[Any] = num_hidden_layers
UpperCAmelCase__ : int = num_attention_heads
UpperCAmelCase__ : Any = intermediate_size
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : str = attention_probs_dropout_prob
UpperCAmelCase__ : str = max_position_embeddings
UpperCAmelCase__ : Union[str, Any] = eos_token_id
UpperCAmelCase__ : Union[str, Any] = pad_token_id
UpperCAmelCase__ : List[str] = bos_token_id
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size ).clip(3 ,self.vocab_size )
UpperCAmelCase__ : List[str] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) ,1 )
UpperCAmelCase__ : Any = np.concatenate([input_ids, eos_tensor] ,axis=1 )
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase__ : str = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
UpperCAmelCase__ : Optional[Any] = prepare_pegasus_inputs_dict(A ,A ,A )
return config, inputs_dict
def __lowercase ( self : Any ,A : Optional[int] ,A : str ,A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = 20
UpperCAmelCase__ : Dict = model_class_name(A )
UpperCAmelCase__ : str = model.encode(inputs_dict["""input_ids"""] )
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
UpperCAmelCase__ : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] ,A ,A )
UpperCAmelCase__ : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) ,dtype="""i4""" )
UpperCAmelCase__ : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] ,(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) ,)
UpperCAmelCase__ : Optional[int] = model.decode(
decoder_input_ids[:, :-1] ,A ,decoder_attention_mask=A ,past_key_values=A ,decoder_position_ids=A ,)
UpperCAmelCase__ : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] ,dtype="""i4""" )
UpperCAmelCase__ : int = model.decode(
decoder_input_ids[:, -1:] ,A ,decoder_attention_mask=A ,past_key_values=outputs_cache.past_key_values ,decoder_position_ids=A ,)
UpperCAmelCase__ : Dict = model.decode(A ,A )
UpperCAmelCase__ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 ,msg=f"Max diff is {diff}" )
def __lowercase ( self : Optional[int] ,A : str ,A : Dict ,A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = 20
UpperCAmelCase__ : str = model_class_name(A )
UpperCAmelCase__ : Any = model.encode(inputs_dict["""input_ids"""] )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
UpperCAmelCase__ : Optional[int] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] ,axis=-1 ,)
UpperCAmelCase__ : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] ,A ,A )
UpperCAmelCase__ : List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] ,(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) ,)
UpperCAmelCase__ : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1] ,A ,decoder_attention_mask=A ,past_key_values=A ,decoder_position_ids=A ,)
UpperCAmelCase__ : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] ,dtype="""i4""" )
UpperCAmelCase__ : Dict = model.decode(
decoder_input_ids[:, -1:] ,A ,past_key_values=outputs_cache.past_key_values ,decoder_attention_mask=A ,decoder_position_ids=A ,)
UpperCAmelCase__ : Union[str, Any] = model.decode(A ,A ,decoder_attention_mask=A )
UpperCAmelCase__ : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 ,msg=f"Max diff is {diff}" )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , ):
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase__ : Union[str, Any] = np.not_equal(__UpperCamelCase , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
UpperCAmelCase__ : Tuple = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class __lowercase ( __lowerCamelCase , unittest.TestCase ):
snake_case_ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
snake_case_ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = False
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = FlaxPegasusModelTester(self )
UpperCAmelCase__ : Optional[Any] = ConfigTester(self ,config_class=A )
def __lowercase ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(A ,A ,A )
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(A ,A ,A )
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ : List[Any] = self._prepare_for_class(A ,A )
UpperCAmelCase__ : int = model_class(A )
@jax.jit
def encode_jitted(A : Optional[int] ,A : Union[str, Any]=None ,**A : Optional[Any] ):
return model.encode(input_ids=A ,attention_mask=A )
with self.subTest("""JIT Enabled""" ):
UpperCAmelCase__ : int = encode_jitted(**A ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCAmelCase__ : Dict = encode_jitted(**A ).to_tuple()
self.assertEqual(len(A ) ,len(A ) )
for jitted_output, output in zip(A ,A ):
self.assertEqual(jitted_output.shape ,output.shape )
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ : Dict = model_class(A )
UpperCAmelCase__ : str = model.encode(inputs_dict["""input_ids"""] ,inputs_dict["""attention_mask"""] )
UpperCAmelCase__ : Dict = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(A : List[Any] ,A : Any ,A : List[Any] ):
return model.decode(
decoder_input_ids=A ,decoder_attention_mask=A ,encoder_outputs=A ,)
with self.subTest("""JIT Enabled""" ):
UpperCAmelCase__ : Tuple = decode_jitted(**A ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCAmelCase__ : str = decode_jitted(**A ).to_tuple()
self.assertEqual(len(A ) ,len(A ) )
for jitted_output, output in zip(A ,A ):
self.assertEqual(jitted_output.shape ,output.shape )
@slow
def __lowercase ( self : List[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : List[str] = model_class_name.from_pretrained("""google/pegasus-large""" ,from_pt=A )
UpperCAmelCase__ : Any = np.ones((1, 1) )
UpperCAmelCase__ : Optional[Any] = model(A )
self.assertIsNotNone(A )
@slow
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
UpperCAmelCase__ : Optional[Any] = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
UpperCAmelCase__ : Union[str, Any] = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
UpperCAmelCase__ : str = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
UpperCAmelCase__ : str = tokenizer(A ,return_tensors="""np""" ,truncation=A ,max_length=512 ,padding=A )
UpperCAmelCase__ : Union[str, Any] = model.generate(**A ,num_beams=2 ).sequences
UpperCAmelCase__ : int = tokenizer.batch_decode(A ,skip_special_tokens=A )
assert tgt_text == decoded
| 65 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase : Optional[Any] = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 3 | 0 |
import string
import numpy
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
return b if a == 0 else greatest_common_divisor(b % a , SCREAMING_SNAKE_CASE )
class lowerCAmelCase_ :
_UpperCamelCase : Any = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
_UpperCamelCase : Tuple = numpy.vectorize(lambda __snake_case : x % 36 )
_UpperCamelCase : Any = numpy.vectorize(__snake_case )
def __init__( self , _lowerCAmelCase ):
_lowercase : str = self.modulus(_lowerCAmelCase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
_lowercase : int = encrypt_key.shape[0]
def __a ( self , _lowerCAmelCase ):
return self.key_string.index(_lowerCAmelCase )
def __a ( self , _lowerCAmelCase ):
return self.key_string[round(_lowerCAmelCase )]
def __a ( self ):
_lowercase : str = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_lowercase : int = det % len(self.key_string )
_lowercase : List[str] = len(self.key_string )
if greatest_common_divisor(_lowerCAmelCase , len(self.key_string ) ) != 1:
_lowercase : str = (
F"""determinant modular {req_l} of encryption key({det}) """
F"""is not co prime w.r.t {req_l}.\nTry another key."""
)
raise ValueError(_lowerCAmelCase )
def __a ( self , _lowerCAmelCase ):
_lowercase : Optional[Any] = [char for char in text.upper() if char in self.key_string]
_lowercase : Union[str, Any] = chars[-1]
while len(_lowerCAmelCase ) % self.break_key != 0:
chars.append(_lowerCAmelCase )
return "".join(_lowerCAmelCase )
def __a ( self , _lowerCAmelCase ):
_lowercase : Optional[int] = self.process_text(text.upper() )
_lowercase : Dict = ''
for i in range(0 , len(_lowerCAmelCase ) - self.break_key + 1 , self.break_key ):
_lowercase : List[Any] = text[i : i + self.break_key]
_lowercase : str = [self.replace_letters(_lowerCAmelCase ) for char in batch]
_lowercase : Union[str, Any] = numpy.array([vec] ).T
_lowercase : List[Any] = self.modulus(self.encrypt_key.dot(_lowerCAmelCase ) ).T.tolist()[
0
]
_lowercase : Any = ''.join(
self.replace_digits(_lowerCAmelCase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def __a ( self ):
_lowercase : Any = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_lowercase : Any = det % len(self.key_string )
_lowercase : Any = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
_lowercase : Union[str, Any] = i
break
_lowercase : List[str] = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(_lowerCAmelCase ) )
def __a ( self , _lowerCAmelCase ):
_lowercase : Optional[int] = self.make_decrypt_key()
_lowercase : Union[str, Any] = self.process_text(text.upper() )
_lowercase : List[str] = ''
for i in range(0 , len(_lowerCAmelCase ) - self.break_key + 1 , self.break_key ):
_lowercase : List[Any] = text[i : i + self.break_key]
_lowercase : Any = [self.replace_letters(_lowerCAmelCase ) for char in batch]
_lowercase : Optional[int] = numpy.array([vec] ).T
_lowercase : int = self.modulus(decrypt_key.dot(_lowerCAmelCase ) ).T.tolist()[0]
_lowercase : str = ''.join(
self.replace_digits(_lowerCAmelCase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def __magic_name__ ( ) -> None:
_lowercase : Any = int(input('Enter the order of the encryption key: ' ) )
_lowercase : Any = []
print('Enter each row of the encryption key with space separated integers' )
for _ in range(SCREAMING_SNAKE_CASE ):
_lowercase : Tuple = [int(SCREAMING_SNAKE_CASE ) for x in input().split()]
hill_matrix.append(SCREAMING_SNAKE_CASE )
_lowercase : int = HillCipher(numpy.array(SCREAMING_SNAKE_CASE ) )
print('Would you like to encrypt or decrypt some text? (1 or 2)' )
_lowercase : List[str] = input('\n1. Encrypt\n2. Decrypt\n' )
if option == "1":
_lowercase : int = input('What text would you like to encrypt?: ' )
print('Your encrypted text is:' )
print(hc.encrypt(SCREAMING_SNAKE_CASE ) )
elif option == "2":
_lowercase : List[str] = input('What text would you like to decrypt?: ' )
print('Your decrypted text is:' )
print(hc.decrypt(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 66 |
'''simple docstring'''
lowerCAmelCase : Optional[Any] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def A_( A : dict , A : str , A : Optional[Any]):
UpperCamelCase = set()
# keep track of all the paths to be checked
UpperCamelCase = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
UpperCamelCase = queue.pop(0)
# get the last node from the path
UpperCamelCase = path[-1]
if node not in explored:
UpperCamelCase = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
UpperCamelCase = list(A)
new_path.append(A)
queue.append(A)
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(A)
# in case there's no path between the 2 nodes
return []
def A_( A : dict , A : str , A : Tuple):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
UpperCamelCase = [start]
UpperCamelCase = set(A)
# Keep tab on distances from `start` node.
UpperCamelCase = {start: 0, target: -1}
while queue:
UpperCamelCase = queue.pop(0)
if node == target:
UpperCamelCase = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node])
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(A)
queue.append(A)
UpperCamelCase = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
| 3 | 0 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A_ :
"""simple docstring"""
def __init__( self : str ,__A : Any ,__A : Optional[int]=3 ,__A : List[str]=32 ,__A : Optional[int]=3 ,__A : Optional[Any]=10 ,__A : Any=[8, 16, 32, 64] ,__A : Optional[int]=[1, 1, 2, 1] ,__A : int=True ,__A : Dict=True ,__A : List[str]="relu" ,__A : List[Any]=3 ,__A : Optional[Any]=None ,__A : Any=["stage2", "stage3", "stage4"] ,__A : str=[2, 3, 4] ,__A : Optional[int]=1 ,) -> List[Any]:
_lowercase = parent
_lowercase = batch_size
_lowercase = image_size
_lowercase = num_channels
_lowercase = embeddings_size
_lowercase = hidden_sizes
_lowercase = depths
_lowercase = is_training
_lowercase = use_labels
_lowercase = hidden_act
_lowercase = num_labels
_lowercase = scope
_lowercase = len(__A )
_lowercase = out_features
_lowercase = out_indices
_lowercase = num_groups
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
_lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase = None
if self.use_labels:
_lowercase = ids_tensor([self.batch_size] ,self.num_labels )
_lowercase = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
return BitConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,out_features=self.out_features ,out_indices=self.out_indices ,num_groups=self.num_groups ,)
def __UpperCAmelCase ( self : List[str] ,__A : int ,__A : Union[str, Any] ,__A : int ) -> List[Any]:
_lowercase = BitModel(config=__A )
model.to(__A )
model.eval()
_lowercase = model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __UpperCAmelCase ( self : List[str] ,__A : List[Any] ,__A : Optional[Any] ,__A : str ) -> Union[str, Any]:
_lowercase = self.num_labels
_lowercase = BitForImageClassification(__A )
model.to(__A )
model.eval()
_lowercase = model(__A ,labels=__A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self : str ,__A : List[str] ,__A : Optional[Any] ,__A : int ) -> List[Any]:
_lowercase = BitBackbone(config=__A )
model.to(__A )
model.eval()
_lowercase = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_lowercase = None
_lowercase = BitBackbone(config=__A )
model.to(__A )
model.eval()
_lowercase = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
_lowercase = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase = config_and_inputs
_lowercase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Dict = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Any = False
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
_lowercase = BitModelTester(self )
_lowercase = ConfigTester(self ,config_class=__A ,has_text_modality=__A )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
return
@unittest.skip(reason='Bit does not output attentions' )
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
pass
@unittest.skip(reason='Bit does not use inputs_embeds' )
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
pass
@unittest.skip(reason='Bit does not support input and output embeddings' )
def __UpperCAmelCase ( self : str ) -> Dict:
pass
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
_lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase = [*signature.parameters.keys()]
_lowercase = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__A )
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __UpperCAmelCase ( self : int ) -> List[str]:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__A )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(config=__A )
for name, module in model.named_modules():
if isinstance(__A ,(nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) ,msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
self.assertTrue(
torch.all(module.bias == 0 ) ,msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
def check_hidden_states_output(__A : str ,__A : List[str] ,__A : int ):
_lowercase = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowercase = model(**self._prepare_for_class(__A ,__A ) )
_lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowercase = self.model_tester.num_stages
self.assertEqual(len(__A ) ,expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowercase = layer_type
_lowercase = True
check_hidden_states_output(__A ,__A ,__A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase = True
check_hidden_states_output(__A ,__A ,__A )
@unittest.skip(reason='Bit does not use feedforward chunking' )
def __UpperCAmelCase ( self : Dict ) -> Tuple:
pass
def __UpperCAmelCase ( self : Any ) -> List[Any]:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@slow
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase = BitModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __UpperCAmelCase ( self : Tuple ) -> Dict:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __UpperCAmelCase ( self : str ) -> Any:
_lowercase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__A )
_lowercase = self.default_image_processor
_lowercase = prepare_img()
_lowercase = image_processor(images=__A ,return_tensors='pt' ).to(__A )
# forward pass
with torch.no_grad():
_lowercase = model(**__A )
# verify the logits
_lowercase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,__A )
_lowercase = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__A ,atol=1e-4 ) )
@require_torch
class A_ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = (BitBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Optional[Any] = BitConfig
SCREAMING_SNAKE_CASE_ : Optional[int] = False
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
_lowercase = BitModelTester(self ) | 67 |
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class SCREAMING_SNAKE_CASE__ :
def __init__( self )-> Dict:
'''simple docstring'''
UpperCamelCase = ''
UpperCamelCase = ''
UpperCamelCase = []
UpperCamelCase = 0
UpperCamelCase = 256
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
def UpperCAmelCase_ ( self , A_ )-> str:
'''simple docstring'''
UpperCamelCase = cva.imread(A_ , 0 )
UpperCamelCase = copy.deepcopy(self.img )
UpperCamelCase , UpperCamelCase , UpperCamelCase = plt.hist(self.img.ravel() , 256 , [0, 256] , label='x' )
UpperCamelCase = np.sum(A_ )
for i in range(len(A_ ) ):
UpperCamelCase = x[i] / self.k
self.sk += prk
UpperCamelCase = (self.L - 1) * self.sk
if self.rem != 0:
UpperCamelCase = int(last % last )
UpperCamelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(A_ )
UpperCamelCase = int(np.ma.count(self.img ) / self.img[1].size )
UpperCamelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCamelCase = self.img[j][i]
if num != self.last_list[num]:
UpperCamelCase = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
plt.hist(self.img.ravel() , 256 , [0, 256] )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
lowerCAmelCase : str = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 3 | 0 |
import math
import os
import sys
def lowercase__ ( A_: str ) -> str:
"""simple docstring"""
__UpperCAmelCase =""""""
try:
with open(A_ , """rb""" ) as binary_file:
__UpperCAmelCase =binary_file.read()
for dat in data:
__UpperCAmelCase =F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def lowercase__ ( A_: dict[str, str] , A_: str , A_: int , A_: str ) -> None:
"""simple docstring"""
lexicon.pop(A_ )
__UpperCAmelCase =last_match_id
if math.loga(A_ ).is_integer():
for curr_key in lexicon:
__UpperCAmelCase ="""0""" + lexicon[curr_key]
__UpperCAmelCase =bin(A_ )[2:]
def lowercase__ ( A_: str ) -> str:
"""simple docstring"""
__UpperCAmelCase ={"""0""": """0""", """1""": """1"""}
__UpperCAmelCase , __UpperCAmelCase ="""""", """"""
__UpperCAmelCase =len(A_ )
for i in range(len(A_ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__UpperCAmelCase =lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(A_ , A_ , A_ , A_ )
index += 1
__UpperCAmelCase =""""""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__UpperCAmelCase =lexicon[curr_string]
result += last_match_id
return result
def lowercase__ ( A_: str , A_: str ) -> str:
"""simple docstring"""
__UpperCAmelCase =os.path.getsize(A_ )
__UpperCAmelCase =bin(A_ )[2:]
__UpperCAmelCase =len(A_ )
return "0" * (length_length - 1) + file_length_binary + compressed
def lowercase__ ( A_: str , A_: str ) -> None:
"""simple docstring"""
__UpperCAmelCase =8
try:
with open(A_ , """wb""" ) as opened_file:
__UpperCAmelCase =[
to_write[i : i + byte_length]
for i in range(0 , len(A_ ) , A_ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(A_ , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def lowercase__ ( A_: str , A_: str ) -> None:
"""simple docstring"""
__UpperCAmelCase =read_file_binary(A_ )
__UpperCAmelCase =compress_data(A_ )
__UpperCAmelCase =add_file_length(A_ , A_ )
write_file_binary(A_ , A_ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 68 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """unispeech-sat"""
def __init__( self , A_=32 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.1 , A_=0.1 , A_=0.02 , A_=1e-5 , A_="group" , A_="gelu" , A_=(512, 512, 512, 512, 512, 512, 512) , A_=(5, 2, 2, 2, 2, 2, 2) , A_=(10, 3, 3, 3, 3, 2, 2) , A_=False , A_=128 , A_=16 , A_=False , A_=True , A_=0.05 , A_=10 , A_=2 , A_=0.0 , A_=10 , A_=0 , A_=320 , A_=2 , A_=0.1 , A_=100 , A_=256 , A_=256 , A_=0.1 , A_="mean" , A_=False , A_=False , A_=256 , A_=(512, 512, 512, 512, 1500) , A_=(5, 3, 3, 1, 1) , A_=(1, 2, 3, 1, 1) , A_=512 , A_=0 , A_=1 , A_=2 , A_=504 , **A_ , )-> Tuple:
'''simple docstring'''
super().__init__(**A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ )
UpperCamelCase = hidden_size
UpperCamelCase = feat_extract_norm
UpperCamelCase = feat_extract_activation
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
UpperCamelCase = conv_bias
UpperCamelCase = num_conv_pos_embeddings
UpperCamelCase = num_conv_pos_embedding_groups
UpperCamelCase = len(self.conv_dim )
UpperCamelCase = num_hidden_layers
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = feat_proj_dropout
UpperCamelCase = final_dropout
UpperCamelCase = layerdrop
UpperCamelCase = layer_norm_eps
UpperCamelCase = initializer_range
UpperCamelCase = vocab_size
UpperCamelCase = num_clusters
UpperCamelCase = do_stable_layer_norm
UpperCamelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase = apply_spec_augment
UpperCamelCase = mask_time_prob
UpperCamelCase = mask_time_length
UpperCamelCase = mask_time_min_masks
UpperCamelCase = mask_feature_prob
UpperCamelCase = mask_feature_length
UpperCamelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCamelCase = num_codevectors_per_group
UpperCamelCase = num_codevector_groups
UpperCamelCase = contrastive_logits_temperature
UpperCamelCase = feat_quantizer_dropout
UpperCamelCase = num_negatives
UpperCamelCase = codevector_dim
UpperCamelCase = proj_codevector_dim
UpperCamelCase = diversity_loss_weight
# ctc loss
UpperCamelCase = ctc_loss_reduction
UpperCamelCase = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
UpperCamelCase = xvector_output_dim
@property
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 3 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> list[int]:
if num <= 0:
__snake_case = F'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(_UpperCAmelCase )
__snake_case = [True] * (num + 1)
__snake_case = []
__snake_case = 2
__snake_case = int(math.sqrt(_UpperCAmelCase ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(_UpperCAmelCase )
# Set multiples of start be False
for i in range(start * start , num + 1 , _UpperCAmelCase ):
if sieve[i] is True:
__snake_case = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(_UpperCAmelCase )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('''Enter a positive integer: ''').strip())))
| 69 |
'''simple docstring'''
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self , A_ , A_=100 , A_=13 , A_=30 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=4 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=3 , A_=None , A_=[0, 1, 2, 3] , )-> Any:
'''simple docstring'''
UpperCamelCase = parent
UpperCamelCase = 100
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = scope
UpperCamelCase = out_indices
UpperCamelCase = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase = (image_size // patch_size) ** 2
UpperCamelCase = num_patches + 1
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ )-> List[str]:
'''simple docstring'''
UpperCamelCase = BeitModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ )-> Any:
'''simple docstring'''
UpperCamelCase = BeitForMaskedImageModeling(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.type_sequence_label_size
UpperCamelCase = BeitForImageClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = BeitForImageClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = self.num_labels
UpperCamelCase = BeitForSemanticSegmentation(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
UpperCamelCase = model(A_ , labels=A_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , unittest.TestCase):
lowerCAmelCase_ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{
"""feature-extraction""": BeitModel,
"""image-classification""": BeitForImageClassification,
"""image-segmentation""": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = BeitModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A_ )
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(A_ ), BeitForMaskedImageModeling]:
continue
UpperCamelCase = model_class(A_ )
model.to(A_ )
model.train()
UpperCamelCase = self._prepare_for_class(A_ , A_ , return_labels=A_ )
UpperCamelCase = model(**A_ ).loss
loss.backward()
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCamelCase = False
UpperCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(A_ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCamelCase = model_class(A_ )
model.gradient_checkpointing_enable()
model.to(A_ )
model.train()
UpperCamelCase = self._prepare_for_class(A_ , A_ , return_labels=A_ )
UpperCamelCase = model(**A_ ).loss
loss.backward()
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = _config_zero_init(A_ )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(config=A_ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = BeitModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def A_( ):
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
@cached_property
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(A_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).pixel_values.to(A_ )
# prepare bool_masked_pos
UpperCamelCase = torch.ones((1, 196) , dtype=torch.bool ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(pixel_values=A_ , bool_masked_pos=A_ )
UpperCamelCase = outputs.logits
# verify the logits
UpperCamelCase = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , A_ )
UpperCamelCase = torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(A_ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , A_ , atol=1e-2 ) )
@slow
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(A_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
UpperCamelCase = outputs.logits
# verify the logits
UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(logits.shape , A_ )
UpperCamelCase = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(A_ )
self.assertTrue(torch.allclose(logits[0, :3] , A_ , atol=1e-4 ) )
UpperCamelCase = 281
self.assertEqual(logits.argmax(-1 ).item() , A_ )
@slow
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
A_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
UpperCamelCase = outputs.logits
# verify the logits
UpperCamelCase = torch.Size((1, 21841) )
self.assertEqual(logits.shape , A_ )
UpperCamelCase = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(A_ )
self.assertTrue(torch.allclose(logits[0, :3] , A_ , atol=1e-4 ) )
UpperCamelCase = 2396
self.assertEqual(logits.argmax(-1 ).item() , A_ )
@slow
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
UpperCamelCase = model.to(A_ )
UpperCamelCase = BeitImageProcessor(do_resize=A_ , size=640 , do_center_crop=A_ )
UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
UpperCamelCase = Image.open(ds[0]['file'] )
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
UpperCamelCase = outputs.logits
# verify the logits
UpperCamelCase = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , A_ )
UpperCamelCase = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
UpperCamelCase = torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
] , device=A_ , )
else:
UpperCamelCase = torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
] , device=A_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A_ , atol=1e-4 ) )
@slow
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
UpperCamelCase = model.to(A_ )
UpperCamelCase = BeitImageProcessor(do_resize=A_ , size=640 , do_center_crop=A_ )
UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
UpperCamelCase = Image.open(ds[0]['file'] )
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
UpperCamelCase = outputs.logits.detach().cpu()
UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=A_ , target_sizes=[(500, 300)] )
UpperCamelCase = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , A_ )
UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=A_ )
UpperCamelCase = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , A_ )
| 3 | 0 |
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : int , lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 70 |
'''simple docstring'''
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase : Dict = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( enum.Enum):
lowerCAmelCase_ = 0
lowerCAmelCase_ = 1
@add_end_docstrings(snake_case_)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """generated"""
def __init__( self , *A_ , **A_ )-> Optional[int]:
'''simple docstring'''
super().__init__(*A_ , **A_ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCAmelCase_ ( self , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , **A_ , )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = {}
if truncation is not None:
UpperCamelCase = truncation
UpperCamelCase = generate_kwargs
UpperCamelCase = {}
if return_tensors is not None and return_type is None:
UpperCamelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
UpperCamelCase = return_type
if clean_up_tokenization_spaces is not None:
UpperCamelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCamelCase = self.tokenizer.encode(A_ , add_special_tokens=A_ )
if len(A_ ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
UpperCamelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> Optional[int]:
'''simple docstring'''
return True
def UpperCAmelCase_ ( self , *A_ , A_ )-> Any:
'''simple docstring'''
UpperCamelCase = self.model.config.prefix if self.model.config.prefix is not None else ''
if isinstance(args[0] , A_ ):
if self.tokenizer.pad_token_id is None:
raise ValueError('Please make sure that the tokenizer has a pad_token_id when using a batch input' )
UpperCamelCase = ([prefix + arg for arg in args[0]],)
UpperCamelCase = True
elif isinstance(args[0] , A_ ):
UpperCamelCase = (prefix + args[0],)
UpperCamelCase = False
else:
raise ValueError(
F''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
UpperCamelCase = self.tokenizer(*A_ , padding=A_ , truncation=A_ , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *A_ , **A_ )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = super().__call__(*A_ , **A_ )
if (
isinstance(args[0] , A_ )
and all(isinstance(A_ , A_ ) for el in args[0] )
and all(len(A_ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCAmelCase_ ( self , A_ , A_=TruncationStrategy.DO_NOT_TRUNCATE , **A_ )-> Any:
'''simple docstring'''
UpperCamelCase = self._parse_and_tokenize(A_ , truncation=A_ , **A_ )
return inputs
def UpperCAmelCase_ ( self , A_ , **A_ )-> int:
'''simple docstring'''
if self.framework == "pt":
UpperCamelCase , UpperCamelCase = model_inputs['input_ids'].shape
elif self.framework == "tf":
UpperCamelCase , UpperCamelCase = tf.shape(model_inputs['input_ids'] ).numpy()
UpperCamelCase = generate_kwargs.get('min_length' , self.model.config.min_length )
UpperCamelCase = generate_kwargs.get('max_length' , self.model.config.max_length )
self.check_inputs(A_ , generate_kwargs['min_length'] , generate_kwargs['max_length'] )
UpperCamelCase = self.model.generate(**A_ , **A_ )
UpperCamelCase = output_ids.shape[0]
if self.framework == "pt":
UpperCamelCase = output_ids.reshape(A_ , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
UpperCamelCase = tf.reshape(A_ , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCAmelCase_ ( self , A_ , A_=ReturnType.TEXT , A_=False )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
UpperCamelCase = {F'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
UpperCamelCase = {
F'''{self.return_name}_text''': self.tokenizer.decode(
A_ , skip_special_tokens=A_ , clean_up_tokenization_spaces=A_ , )
}
records.append(A_ )
return records
@add_end_docstrings(snake_case_)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """summary"""
def __call__( self , *A_ , **A_ )-> Optional[int]:
'''simple docstring'''
return super().__call__(*A_ , **A_ )
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> bool:
'''simple docstring'''
if max_length < min_length:
logger.warning(F'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
F'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
'a summarization task, where outputs shorter than the input are typically wanted, you might '
F'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(snake_case_)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """translation"""
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> List[Any]:
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
F'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
'increasing your max_length manually, e.g. translator(\'...\', max_length=400)' )
return True
def UpperCAmelCase_ ( self , *A_ , A_=TruncationStrategy.DO_NOT_TRUNCATE , A_=None , A_=None )-> Dict:
'''simple docstring'''
if getattr(self.tokenizer , '_build_translation_inputs' , A_ ):
return self.tokenizer._build_translation_inputs(
*A_ , return_tensors=self.framework , truncation=A_ , src_lang=A_ , tgt_lang=A_ )
else:
return super()._parse_and_tokenize(*A_ , truncation=A_ )
def UpperCAmelCase_ ( self , A_=None , A_=None , **A_ )-> str:
'''simple docstring'''
UpperCamelCase , UpperCamelCase , UpperCamelCase = super()._sanitize_parameters(**A_ )
if src_lang is not None:
UpperCamelCase = src_lang
if tgt_lang is not None:
UpperCamelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
UpperCamelCase = kwargs.get('task' , self.task )
UpperCamelCase = task.split('_' )
if task and len(A_ ) == 4:
# translation, XX, to YY
UpperCamelCase = items[1]
UpperCamelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *A_ , **A_ )-> Any:
'''simple docstring'''
return super().__call__(*A_ , **A_ )
| 3 | 0 |
'''simple docstring'''
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class _snake_case :
def __init__( self ,_snake_case ,_snake_case=99 ,_snake_case=13 ,_snake_case=7 ,_snake_case=9 ,_snake_case=True ,_snake_case=True ,_snake_case=False ,_snake_case=32 ,_snake_case=5 ,_snake_case=4 ,_snake_case=37 ,_snake_case=8 ,_snake_case=0.1 ,_snake_case=0.002 ,_snake_case=1 ,_snake_case=0 ,_snake_case=0 ,_snake_case=None ,_snake_case=None ,):
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : List[str] = batch_size
UpperCAmelCase_ : int = encoder_seq_length
UpperCAmelCase_ : int = decoder_seq_length
# For common tests
UpperCAmelCase_ : Any = self.decoder_seq_length
UpperCAmelCase_ : str = is_training
UpperCAmelCase_ : List[Any] = use_attention_mask
UpperCAmelCase_ : List[str] = use_labels
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : Any = d_ff
UpperCAmelCase_ : List[Any] = relative_attention_num_buckets
UpperCAmelCase_ : str = dropout_rate
UpperCAmelCase_ : Optional[int] = initializer_factor
UpperCAmelCase_ : Union[str, Any] = eos_token_id
UpperCAmelCase_ : int = pad_token_id
UpperCAmelCase_ : List[str] = decoder_start_token_id
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Union[str, Any] = decoder_layers
def UpperCamelCase__ ( self ):
return TaConfig.from_pretrained("google/umt5-base" )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case=None ,):
if attention_mask is None:
UpperCAmelCase_ : Union[str, Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
UpperCAmelCase_ : int = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
UpperCAmelCase_ : List[str] = torch.ones(config.num_hidden_layers ,config.num_attention_heads ,device=_snake_case )
if decoder_head_mask is None:
UpperCAmelCase_ : str = torch.ones(config.num_decoder_layers ,config.num_attention_heads ,device=_snake_case )
if cross_attn_head_mask is None:
UpperCAmelCase_ : Optional[Any] = torch.ones(
config.num_decoder_layers ,config.num_attention_heads ,device=_snake_case )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.encoder_seq_length] ,self.vocab_size )
UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.decoder_seq_length] ,self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
UpperCAmelCase_ : Tuple = input_ids.clamp(self.pad_token_id + 1 )
UpperCAmelCase_ : List[str] = decoder_input_ids.clamp(self.pad_token_id + 1 )
UpperCAmelCase_ : Any = self.get_config()
UpperCAmelCase_ : Union[str, Any] = config.num_attention_heads
UpperCAmelCase_ : List[Any] = self.prepare_inputs_dict(_snake_case ,_snake_case ,_snake_case )
return config, input_dict
def UpperCamelCase__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCamelCase__ ( self ):
return TaConfig(
vocab_size=1_66 ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def UpperCamelCase__ ( self ):
return TaConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,):
UpperCAmelCase_ : int = UMTaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(
input_ids=_snake_case ,decoder_input_ids=_snake_case ,attention_mask=_snake_case ,decoder_attention_mask=_snake_case ,)
UpperCAmelCase_ : str = model(input_ids=_snake_case ,decoder_input_ids=_snake_case )
UpperCAmelCase_ : Union[str, Any] = result.last_hidden_state
UpperCAmelCase_ : List[Any] = result.past_key_values
UpperCAmelCase_ : Any = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() ,(self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() ,(self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_snake_case ) ,config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) ,4 )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,):
UpperCAmelCase_ : Dict = UMTaModel(config=_snake_case ).get_decoder().to(_snake_case ).eval()
# first forward pass
UpperCAmelCase_ : Dict = model(_snake_case ,use_cache=_snake_case )
UpperCAmelCase_ : int = model(_snake_case )
UpperCAmelCase_ : Optional[int] = model(_snake_case ,use_cache=_snake_case )
self.parent.assertTrue(len(_snake_case ) == len(_snake_case ) )
self.parent.assertTrue(len(_snake_case ) == len(_snake_case ) + 1 )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ : Dict = ids_tensor((self.batch_size, 1) ,config.vocab_size )
# append to next input_ids and
UpperCAmelCase_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 )
UpperCAmelCase_ : Dict = model(_snake_case )["last_hidden_state"]
UpperCAmelCase_ : Dict = model(_snake_case ,past_key_values=_snake_case )["last_hidden_state"]
# select random slice
UpperCAmelCase_ : str = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
UpperCAmelCase_ : Tuple = output_from_no_past[:, -1, random_slice_idx].detach()
UpperCAmelCase_ : Any = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_snake_case ,_snake_case ,atol=1E-3 ) )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,):
UpperCAmelCase_ : Dict = UMTaModel(config=_snake_case ).to(_snake_case ).half().eval()
UpperCAmelCase_ : List[str] = model(**_snake_case )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(_snake_case ).any().item() )
@require_torch
class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Optional[int] =(
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
__A : Any =(UMTaForConditionalGeneration,) if is_torch_available() else ()
__A : str =(
{
"conversational": UMTaForConditionalGeneration,
"feature-extraction": UMTaModel,
"summarization": UMTaForConditionalGeneration,
"text2text-generation": UMTaForConditionalGeneration,
"translation": UMTaForConditionalGeneration,
"question-answering": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
__A : List[Any] =True
__A : str =False
__A : List[str] =False
__A : int =True
__A : Tuple =True
# The small UMT5 model needs higher percentages for CPU/MP tests
__A : Optional[int] =[0.8, 0.9]
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_ : Union[str, Any] = UMTaModel(config_and_inputs[0] ).to(_snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_snake_case ,(config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) ,f'''{tmpdirname}/t5_test.onnx''' ,export_params=_snake_case ,opset_version=9 ,input_names=["input_ids", "decoder_input_ids"] ,)
@unittest.skipIf(torch_device == "cpu" ,"Cant do half precision" )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_ : str = config_and_inputs[0]
UpperCAmelCase_ : Optional[int] = UMTaForConditionalGeneration(_snake_case ).eval()
model.to(_snake_case )
UpperCAmelCase_ : Optional[Any] = {
"head_mask": torch.zeros(config.num_layers ,config.num_heads ,device=_snake_case ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers ,config.num_heads ,device=_snake_case ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers ,config.num_heads ,device=_snake_case ),
}
for attn_name, (name, mask) in zip(_snake_case ,head_masking.items() ):
UpperCAmelCase_ : int = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
UpperCAmelCase_ : str = torch.ones(
config.num_decoder_layers ,config.num_heads ,device=_snake_case )
UpperCAmelCase_ : int = model.generate(
config_and_inputs[1]["input_ids"] ,num_beams=1 ,max_length=3 ,output_attentions=_snake_case ,return_dict_in_generate=_snake_case ,**_snake_case ,)
# We check the state of decoder_attentions and cross_attentions just from the last step
UpperCAmelCase_ : Union[str, Any] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) ,0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def UpperCamelCase__ ( self ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class _snake_case (unittest.TestCase):
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = UMTaForConditionalGeneration.from_pretrained("google/umt5-small" ,return_dict=_snake_case ).to(_snake_case )
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained("google/umt5-small" ,use_fast=_snake_case ,legacy=_snake_case )
UpperCAmelCase_ : Optional[int] = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
UpperCAmelCase_ : Tuple = tokenizer(_snake_case ,return_tensors="pt" ,padding=_snake_case ).input_ids
# fmt: off
UpperCAmelCase_ : Any = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(_snake_case ,_snake_case )
UpperCAmelCase_ : int = model.generate(input_ids.to(_snake_case ) )
UpperCAmelCase_ : int = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
UpperCAmelCase_ : Dict = tokenizer.batch_decode(_snake_case )
self.assertEqual(_snake_case ,_snake_case )
| 71 |
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = 0
lowerCAmelCase_ = False
lowerCAmelCase_ = 3.0
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=A_ ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} )
@require_cuda
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
UpperCamelCase = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
UpperCamelCase = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , A_ )
@require_multi_gpu
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(A_ , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase : Tuple = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
lowerCAmelCase : List[str] = Accelerator(kwargs_handlers=[ddp_scaler])
lowerCAmelCase : List[Any] = torch.nn.Linear(1_00, 2_00)
lowerCAmelCase : int = accelerator.prepare(model)
# Check the values changed in kwargs
lowerCAmelCase : Dict = ''
lowerCAmelCase : Dict = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 3 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : List[Any] = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = ['''TimmBackbone''']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
_UpperCAmelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72 |
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_):
@register_to_config
def __init__( self , A_ , A_ = None , A_ = None )-> Tuple:
'''simple docstring'''
super().__init__()
UpperCamelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
UpperCamelCase = torch.zeros(A_ , A_ )
else:
UpperCamelCase = None
UpperCamelCase = torch.nn.Parameter(A_ )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ , )-> Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=A_ , transformer=A_ , text_encoder=A_ , tokenizer=A_ , scheduler=A_ , learned_classifier_free_sampling_embeddings=A_ , )
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> Tuple:
'''simple docstring'''
UpperCamelCase = len(A_ ) if isinstance(A_ , A_ ) else 1
# get prompt text embeddings
UpperCamelCase = self.tokenizer(
A_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
UpperCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
UpperCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=A_ )
# duplicate text embeddings for each generation per prompt
UpperCamelCase = prompt_embeds.repeat_interleave(A_ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
UpperCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings
UpperCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(A_ , 1 , 1 )
else:
UpperCamelCase = [''] * batch_size
UpperCamelCase = text_input_ids.shape[-1]
UpperCamelCase = self.tokenizer(
A_ , padding='max_length' , max_length=A_ , truncation=A_ , return_tensors='pt' , )
UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
UpperCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=A_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase = negative_prompt_embeds.shape[1]
UpperCamelCase = negative_prompt_embeds.repeat(1 , A_ , 1 )
UpperCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , A_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , A_ , A_ = 100 , A_ = 5.0 , A_ = 1.0 , A_ = 1 , A_ = None , A_ = None , A_ = "pil" , A_ = True , A_ = None , A_ = 1 , )-> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(A_ , A_ ):
UpperCamelCase = 1
elif isinstance(A_ , A_ ):
UpperCamelCase = len(A_ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(A_ )}''' )
UpperCamelCase = batch_size * num_images_per_prompt
UpperCamelCase = guidance_scale > 1.0
UpperCamelCase = self._encode_prompt(A_ , A_ , A_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A_ , A_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(A_ )}.''' )
# get the initial completely masked latents unless the user supplied it
UpperCamelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
UpperCamelCase = self.transformer.num_vector_embeds - 1
UpperCamelCase = torch.full(A_ , A_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
UpperCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(A_ , device=self.device )
UpperCamelCase = self.scheduler.timesteps.to(self.device )
UpperCamelCase = latents
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the sample if we are doing classifier free guidance
UpperCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
UpperCamelCase = self.transformer(A_ , encoder_hidden_states=A_ , timestep=A_ ).sample
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase = model_output.chunk(2 )
UpperCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(A_ , dim=1 , keepdim=A_ )
UpperCamelCase = self.truncate(A_ , A_ )
# remove `log(0)`'s (`-inf`s)
UpperCamelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase = self.scheduler.step(A_ , timestep=A_ , sample=A_ , generator=A_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A_ , A_ , A_ )
UpperCamelCase = self.vqvae.config.vq_embed_dim
UpperCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
UpperCamelCase = self.vqvae.quantize.get_codebook_entry(A_ , shape=A_ )
UpperCamelCase = self.vqvae.decode(A_ , force_not_quantize=A_ ).sample
UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
def UpperCAmelCase_ ( self , A_ , A_ )-> torch.FloatTensor:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = torch.sort(A_ , 1 , descending=A_ )
UpperCamelCase = torch.exp(A_ )
UpperCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
UpperCamelCase = torch.full_like(keep_mask[:, 0:1, :] , A_ )
UpperCamelCase = torch.cat((all_true, keep_mask) , dim=1 )
UpperCamelCase = keep_mask[:, :-1, :]
UpperCamelCase = keep_mask.gather(1 , indices.argsort(1 ) )
UpperCamelCase = log_p_x_0.clone()
UpperCamelCase = -torch.inf # -inf = log(0)
return rv
| 3 | 0 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = 1.5
SCREAMING_SNAKE_CASE = int(factor * num_class_images)
SCREAMING_SNAKE_CASE = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=_UpperCAmelCase , aesthetic_weight=0.1)
os.makedirs(F'''{class_data_dir}/images''' , exist_ok=_UpperCAmelCase)
if len(list(Path(F'''{class_data_dir}/images''').iterdir())) >= num_class_images:
return
while True:
SCREAMING_SNAKE_CASE = client.query(text=_UpperCAmelCase)
if len(_UpperCAmelCase) >= factor * num_class_images or num_images > 1e4:
break
else:
SCREAMING_SNAKE_CASE = int(factor * num_images)
SCREAMING_SNAKE_CASE = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=_UpperCAmelCase , aesthetic_weight=0.1 , )
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = tqdm(desc='downloading real regularization images' , total=_UpperCAmelCase)
with open(F'''{class_data_dir}/caption.txt''' , 'w') as fa, open(F'''{class_data_dir}/urls.txt''' , 'w') as fa, open(
F'''{class_data_dir}/images.txt''' , 'w') as fa:
while total < num_class_images:
SCREAMING_SNAKE_CASE = class_images[count]
count += 1
try:
SCREAMING_SNAKE_CASE = requests.get(images['url'])
if img.status_code == 200:
SCREAMING_SNAKE_CASE = Image.open(BytesIO(img.content))
with open(F'''{class_data_dir}/images/{total}.jpg''' , 'wb') as f:
f.write(img.content)
fa.write(images['caption'] + '\n')
fa.write(images['url'] + '\n')
fa.write(F'''{class_data_dir}/images/{total}.jpg''' + '\n')
total += 1
pbar.update(1)
else:
continue
except Exception:
continue
return
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = argparse.ArgumentParser('' , add_help=_UpperCAmelCase)
parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=_UpperCAmelCase , type=_UpperCAmelCase)
parser.add_argument('--class_data_dir' , help='path to save images' , required=_UpperCAmelCase , type=_UpperCAmelCase)
parser.add_argument('--num_class_images' , help='number of images to download' , default=200 , type=_UpperCAmelCase)
return parser.parse_args()
if __name__ == "__main__":
a_ : int = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 73 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Union[str, Any] = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 3 | 0 |
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = 0
while b > 0:
if b & 1:
__SCREAMING_SNAKE_CASE : int = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 74 |
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ :
def __init__( self , A_ = None , A_ = None , A_=None , A_=None )-> Optional[Any]:
'''simple docstring'''
if not conversation_id:
UpperCamelCase = uuid.uuida()
if past_user_inputs is None:
UpperCamelCase = []
if generated_responses is None:
UpperCamelCase = []
UpperCamelCase = conversation_id
UpperCamelCase = past_user_inputs
UpperCamelCase = generated_responses
UpperCamelCase = text
def __eq__( self , A_ )-> List[Any]:
'''simple docstring'''
if not isinstance(A_ , A_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def UpperCAmelCase_ ( self , A_ , A_ = False )-> int:
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '''
F'''with: "{text}".''' )
UpperCamelCase = text
else:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" new input '''
F'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
UpperCamelCase = text
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
UpperCamelCase = None
def UpperCAmelCase_ ( self , A_ )-> int:
'''simple docstring'''
self.generated_responses.append(A_ )
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self )-> Any:
'''simple docstring'''
UpperCamelCase = F'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
UpperCamelCase = 'user' if is_user else 'bot'
output += F'''{name} >> {text} \n'''
return output
@add_end_docstrings(
snake_case_ , R"""
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
""" , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , *A_ , **A_ )-> Any:
'''simple docstring'''
super().__init__(*A_ , **A_ )
if self.tokenizer.pad_token_id is None:
UpperCamelCase = self.tokenizer.eos_token
def UpperCAmelCase_ ( self , A_=None , A_=None , A_=None , **A_ )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = {}
UpperCamelCase = {}
UpperCamelCase = {}
if min_length_for_response is not None:
UpperCamelCase = min_length_for_response
if minimum_tokens is not None:
UpperCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
UpperCamelCase = generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
UpperCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(A_ )
return preprocess_params, forward_params, postprocess_params
def __call__( self , A_ , A_=0 , **A_ )-> Any:
'''simple docstring'''
UpperCamelCase = super().__call__(A_ , num_workers=A_ , **A_ )
if isinstance(A_ , A_ ) and len(A_ ) == 1:
return outputs[0]
return outputs
def UpperCAmelCase_ ( self , A_ , A_=32 )-> Dict[str, Any]:
'''simple docstring'''
if not isinstance(A_ , A_ ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
F'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
UpperCamelCase = self.tokenizer._build_conversation_input_ids(A_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
UpperCamelCase = self._legacy_parse_and_tokenize(A_ )
if self.framework == "pt":
UpperCamelCase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
UpperCamelCase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def UpperCAmelCase_ ( self , A_ , A_=10 , **A_ )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = generate_kwargs.get('max_length' , self.model.config.max_length )
UpperCamelCase = model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
UpperCamelCase = max_length - minimum_tokens
UpperCamelCase = model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
UpperCamelCase = model_inputs['attention_mask'][:, -trim:]
UpperCamelCase = model_inputs.pop('conversation' )
UpperCamelCase = max_length
UpperCamelCase = self.model.generate(**A_ , **A_ )
if self.model.config.is_encoder_decoder:
UpperCamelCase = 1
else:
UpperCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def UpperCAmelCase_ ( self , A_ , A_=True )-> Tuple:
'''simple docstring'''
UpperCamelCase = model_outputs['output_ids']
UpperCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=A_ , clean_up_tokenization_spaces=A_ , )
UpperCamelCase = model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(A_ )
return conversation
def UpperCAmelCase_ ( self , A_ )-> Dict:
'''simple docstring'''
UpperCamelCase = self.tokenizer.eos_token_id
UpperCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(A_ , add_special_tokens=A_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(A_ , add_special_tokens=A_ ) )
if len(A_ ) > self.tokenizer.model_max_length:
UpperCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 3 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_ :
def __init__( self : Tuple , _A : Any , _A : List[str]=13 , _A : Optional[int]=[30, 30] , _A : List[str]=2 , _A : Union[str, Any]=3 , _A : Union[str, Any]=True , _A : Optional[Any]=True , _A : Tuple=32 , _A : Optional[Any]=5 , _A : List[Any]=4 , _A : Any=37 , _A : List[str]="gelu" , _A : Tuple=0.1 , _A : str=0.1 , _A : Tuple=10 , _A : List[Any]=0.0_2 , _A : Any=3 , _A : Optional[int]=None , _A : Tuple=8 , _A : Optional[Any]=10 , ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = parent
UpperCAmelCase__ : Dict = batch_size
UpperCAmelCase__ : str = image_size
UpperCAmelCase__ : List[Any] = patch_size
UpperCAmelCase__ : str = num_channels
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : Optional[int] = use_labels
UpperCAmelCase__ : str = hidden_size
UpperCAmelCase__ : Optional[Any] = num_hidden_layers
UpperCAmelCase__ : Dict = num_attention_heads
UpperCAmelCase__ : List[str] = intermediate_size
UpperCAmelCase__ : List[str] = hidden_act
UpperCAmelCase__ : str = hidden_dropout_prob
UpperCAmelCase__ : List[str] = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[Any] = type_sequence_label_size
UpperCAmelCase__ : List[str] = initializer_range
UpperCAmelCase__ : str = num_labels
UpperCAmelCase__ : List[str] = scope
UpperCAmelCase__ : Union[str, Any] = n_targets
UpperCAmelCase__ : int = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
UpperCAmelCase__ : Any = (image_size[1] // patch_size) * (image_size[0] // patch_size)
UpperCAmelCase__ : Tuple = num_patches + 1 + self.num_detection_tokens
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
UpperCAmelCase__ : Optional[int] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
UpperCAmelCase__ : List[Any] = []
for i in range(self.batch_size ):
UpperCAmelCase__ : str = {}
UpperCAmelCase__ : Optional[Any] = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=_A )
UpperCAmelCase__ : Union[str, Any] = torch.rand(self.n_targets , 4 , device=_A )
labels.append(_A )
UpperCAmelCase__ : List[Any] = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self : Tuple ):
'''simple docstring'''
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def lowercase_ ( self : Union[str, Any] , _A : int , _A : List[str] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = YolosModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : List[Any] = model(_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def lowercase_ ( self : List[Any] , _A : Union[str, Any] , _A : Dict , _A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = YolosForObjectDetection(_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : Optional[Any] = model(pixel_values=_A )
UpperCAmelCase__ : Dict = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
UpperCAmelCase__ : Dict = model(pixel_values=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = config_and_inputs
UpperCAmelCase__ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCAmelCase__ = (
{'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : Union[str, Any] , _A : List[str] , _A : Union[str, Any] , _A : Any=False ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
UpperCAmelCase__ : int = []
for i in range(self.model_tester.batch_size ):
UpperCAmelCase__ : str = {}
UpperCAmelCase__ : str = torch.ones(
size=(self.model_tester.n_targets,) , device=_A , dtype=torch.long )
UpperCAmelCase__ : str = torch.ones(
self.model_tester.n_targets , 4 , device=_A , dtype=torch.float )
labels.append(_A )
UpperCAmelCase__ : str = labels
return inputs_dict
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = YolosModelTester(self )
UpperCAmelCase__ : Optional[int] = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Any = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , nn.Linear ) )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Dict = model_class(_A )
UpperCAmelCase__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : List[str] = [*signature.parameters.keys()]
UpperCAmelCase__ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : List[Any] = True
# in YOLOS, the seq_len is different
UpperCAmelCase__ : Union[str, Any] = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : Union[str, Any] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase__ : Any = outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : int = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : Any = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase__ : str = outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
UpperCAmelCase__ : str = len(_A )
# Check attention is always last and order is fine
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : Optional[int] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase__ : Any = 1
self.assertEqual(out_len + added_hidden_states , len(_A ) )
UpperCAmelCase__ : List[str] = outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
def check_hidden_states_output(_A : Optional[int] , _A : int , _A : List[str] ):
UpperCAmelCase__ : Union[str, Any] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : str = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase__ : Optional[int] = outputs.hidden_states
UpperCAmelCase__ : List[str] = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_A ) , _A )
# YOLOS has a different seq_length
UpperCAmelCase__ : Optional[Any] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : Any = True
check_hidden_states_output(_A , _A , _A )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*_A )
@slow
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : str = YolosModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def a__ ( ) -> Dict:
UpperCAmelCase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(_A )
UpperCAmelCase__ : Optional[Any] = self.default_image_processor
UpperCAmelCase__ : Dict = prepare_img()
UpperCAmelCase__ : Dict = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Union[str, Any] = model(inputs.pixel_values )
# verify outputs
UpperCAmelCase__ : Any = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase__ : Dict = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=_A , )
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _A , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _A , atol=1e-4 ) )
# verify postprocessing
UpperCAmelCase__ : Any = image_processor.post_process_object_detection(
_A , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
UpperCAmelCase__ : str = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(_A )
UpperCAmelCase__ : Any = [75, 75, 17, 63, 17]
UpperCAmelCase__ : int = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(_A )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , _A , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , _A )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , _A ) )
| 75 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
lowerCAmelCase : List[Any] = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
lowerCAmelCase : List[Any] = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
lowerCAmelCase : Tuple = BeautifulSoup(res.text, 'html.parser')
lowerCAmelCase : List[Any] = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f"""https://google.com{link.get('href')}""")
| 3 | 0 |
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 76 |
'''simple docstring'''
import numpy as np
def A_( A : str , A : Optional[Any] , A : Tuple , A : Optional[int] , A : str):
UpperCamelCase = int(np.ceil((x_end - xa) / h))
UpperCamelCase = np.zeros((n + 1,))
UpperCamelCase = ya
UpperCamelCase = xa
for k in range(A):
UpperCamelCase = f(A , y[k])
UpperCamelCase = f(x + 0.5 * h , y[k] + 0.5 * h * ka)
UpperCamelCase = f(x + 0.5 * h , y[k] + 0.5 * h * ka)
UpperCamelCase = f(x + h , y[k] + h * ka)
UpperCamelCase = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
A = logging.get_logger(__name__)
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : Any = WavaVecaForSequenceClassification.from_pretrained(UpperCamelCase , config=UpperCamelCase )
__UpperCAmelCase : int = downstream_dict["projector.weight"]
__UpperCAmelCase : List[Any] = downstream_dict["projector.bias"]
__UpperCAmelCase : Optional[Any] = downstream_dict["model.post_net.linear.weight"]
__UpperCAmelCase : List[Any] = downstream_dict["model.post_net.linear.bias"]
return model
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = WavaVecaForAudioFrameClassification.from_pretrained(UpperCamelCase , config=UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = downstream_dict["model.linear.weight"]
__UpperCAmelCase : Union[str, Any] = downstream_dict["model.linear.bias"]
return model
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
"""simple docstring"""
__UpperCAmelCase : List[str] = WavaVecaForXVector.from_pretrained(UpperCamelCase , config=UpperCamelCase )
__UpperCAmelCase : Tuple = downstream_dict["connector.weight"]
__UpperCAmelCase : str = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__UpperCAmelCase : int = downstream_dict[
f"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
__UpperCAmelCase : Union[str, Any] = downstream_dict[f"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
__UpperCAmelCase : Tuple = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
__UpperCAmelCase : Union[str, Any] = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
__UpperCAmelCase : Dict = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
__UpperCAmelCase : Union[str, Any] = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
__UpperCAmelCase : int = downstream_dict["objective.W"]
return model
@torch.no_grad()
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = torch.load(UpperCamelCase , map_location="cpu" )
__UpperCAmelCase : Optional[Any] = checkpoint["Downstream"]
__UpperCAmelCase : int = WavaVecaConfig.from_pretrained(UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(
UpperCamelCase , return_attention_mask=UpperCamelCase , do_normalize=UpperCamelCase )
__UpperCAmelCase : Dict = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
__UpperCAmelCase : List[Any] = convert_classification(UpperCamelCase , UpperCamelCase , UpperCamelCase )
elif arch.endswith("ForAudioFrameClassification" ):
__UpperCAmelCase : List[str] = convert_diarization(UpperCamelCase , UpperCamelCase , UpperCamelCase )
elif arch.endswith("ForXVector" ):
__UpperCAmelCase : str = convert_xvector(UpperCamelCase , UpperCamelCase , UpperCamelCase )
else:
raise NotImplementedError(f"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
__UpperCAmelCase : Optional[Any] = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(UpperCamelCase )
hf_model.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
A = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 77 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case_)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True})
lowerCAmelCase_ = Features({"""text""": Value("""string""")})
lowerCAmelCase_ = Features({})
lowerCAmelCase_ = "text"
@property
def UpperCAmelCase_ ( self )-> Dict[str, str]:
'''simple docstring'''
return {self.text_column: "text"}
| 3 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int ) -> int:
'''simple docstring'''
assert isinstance(snake_case_ , snake_case_ ), f"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
UpperCAmelCase_ = f"""The input value of [n={number}] has to be > 0"""
raise ValueError(snake_case_ )
else:
UpperCAmelCase_ = sylvester(number - 1 )
UpperCAmelCase_ = num - 1
UpperCAmelCase_ = num
return lower * upper + 1
if __name__ == "__main__":
print(f"The 8th number in Sylvester's sequence: {sylvester(8)}")
| 78 |
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase : Union[str, Any] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
lowerCAmelCase : List[str] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def A_( A : list[float]):
UpperCamelCase = []
UpperCamelCase = len(A)
for i in range(A):
UpperCamelCase = -1
for j in range(i + 1 , A):
if arr[i] < arr[j]:
UpperCamelCase = arr[j]
break
result.append(A)
return result
def A_( A : list[float]):
UpperCamelCase = []
for i, outer in enumerate(A):
UpperCamelCase = -1
for inner in arr[i + 1 :]:
if outer < inner:
UpperCamelCase = inner
break
result.append(A)
return result
def A_( A : list[float]):
UpperCamelCase = len(A)
UpperCamelCase = []
UpperCamelCase = [-1] * arr_size
for index in reversed(range(A)):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
UpperCamelCase = stack[-1]
stack.append(arr[index])
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
lowerCAmelCase : Optional[Any] = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 3 | 0 |
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> float:
'''simple docstring'''
UpperCAmelCase__ : int = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _lowerCamelCase ( ) -> str:
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 79 |
'''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def A_( A : str):
if not sentence:
return ""
UpperCamelCase = dict(zip(A , A))
return lower_to_upper.get(sentence[0] , sentence[0]) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 3 | 0 |
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
__UpperCamelCase : Tuple = """hf-internal-testing/tiny-random-bert"""
__UpperCamelCase : str = os.path.join(TRANSFORMERS_CACHE, """models--hf-internal-testing--tiny-random-bert""")
__UpperCamelCase : Optional[Any] = """9b8c223d42b2188cb49d29af482996f9d0f3e5a6"""
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : List[Any] ) -> str:
"""simple docstring"""
__lowercase = cached_file(_lowerCAmelCase , _lowerCAmelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(_lowerCAmelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) ) )
with open(os.path.join(_lowerCAmelCase , """refs""" , """main""" ) ) as f:
__lowercase = f.read()
self.assertEqual(_lowerCAmelCase , os.path.join(_lowerCAmelCase , """snapshots""" , _lowerCAmelCase , _lowerCAmelCase ) )
self.assertTrue(os.path.isfile(_lowerCAmelCase ) )
# File is cached at the same place the second time.
__lowercase = cached_file(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
# Using a specific revision to test the full commit hash.
__lowercase = cached_file(_lowerCAmelCase , _lowerCAmelCase , revision="""9b8c223""" )
self.assertEqual(_lowerCAmelCase , os.path.join(_lowerCAmelCase , """snapshots""" , _lowerCAmelCase , _lowerCAmelCase ) )
def _a ( self : Optional[int] ) -> Any:
"""simple docstring"""
with self.assertRaisesRegex(_lowerCAmelCase , """is not a valid model identifier""" ):
__lowercase = cached_file("""tiny-random-bert""" , _lowerCAmelCase )
with self.assertRaisesRegex(_lowerCAmelCase , """is not a valid git identifier""" ):
__lowercase = cached_file(_lowerCAmelCase , _lowerCAmelCase , revision="""aaaa""" )
with self.assertRaisesRegex(_lowerCAmelCase , """does not appear to have a file named""" ):
__lowercase = cached_file(_lowerCAmelCase , """conf""" )
def _a ( self : Tuple ) -> str:
"""simple docstring"""
with self.assertRaisesRegex(_lowerCAmelCase , """does not appear to have a file named""" ):
__lowercase = cached_file(_lowerCAmelCase , """conf""" )
with open(os.path.join(_lowerCAmelCase , """refs""" , """main""" ) ) as f:
__lowercase = f.read()
self.assertTrue(os.path.isfile(os.path.join(_lowerCAmelCase , """.no_exist""" , _lowerCAmelCase , """conf""" ) ) )
__lowercase = cached_file(_lowerCAmelCase , """conf""" , _raise_exceptions_for_missing_entries=_lowerCAmelCase )
self.assertIsNone(_lowerCAmelCase )
__lowercase = cached_file(_lowerCAmelCase , """conf""" , local_files_only=_lowerCAmelCase , _raise_exceptions_for_missing_entries=_lowerCAmelCase )
self.assertIsNone(_lowerCAmelCase )
__lowercase = mock.Mock()
__lowercase = 500
__lowercase = {}
__lowercase = HTTPError
__lowercase = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=_lowerCAmelCase ) as mock_head:
__lowercase = cached_file(_lowerCAmelCase , """conf""" , _raise_exceptions_for_connection_errors=_lowerCAmelCase )
self.assertIsNone(_lowerCAmelCase )
# This check we did call the fake head request
mock_head.assert_called()
def _a ( self : Optional[int] ) -> str:
"""simple docstring"""
self.assertTrue(has_file("""hf-internal-testing/tiny-bert-pt-only""" , _lowerCAmelCase ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , _lowerCAmelCase ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , _lowerCAmelCase ) )
def _a ( self : int ) -> Tuple:
"""simple docstring"""
self.assertIsNone(get_file_from_repo("""bert-base-cased""" , """ahah.txt""" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(_lowerCAmelCase , """is not a valid model identifier""" ):
get_file_from_repo("""bert-base-case""" , _lowerCAmelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(_lowerCAmelCase , """is not a valid git identifier""" ):
get_file_from_repo("""bert-base-cased""" , _lowerCAmelCase , revision="""ahaha""" )
__lowercase = get_file_from_repo("""bert-base-cased""" , _lowerCAmelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
__lowercase = json.loads(open(_lowerCAmelCase , """r""" ).read() )
self.assertEqual(config["""hidden_size"""] , 768 )
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = Path(_lowerCAmelCase ) / """a.txt"""
filename.touch()
self.assertEqual(get_file_from_repo(_lowerCAmelCase , """a.txt""" ) , str(_lowerCAmelCase ) )
self.assertIsNone(get_file_from_repo(_lowerCAmelCase , """b.txt""" ) )
| 80 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCAmelCase : Dict = logging.get_logger(__name__)
# General docstring
lowerCAmelCase : str = 'RegNetConfig'
# Base docstring
lowerCAmelCase : str = 'facebook/regnet-y-040'
lowerCAmelCase : Dict = [1, 10_88, 7, 7]
# Image classification docstring
lowerCAmelCase : Dict = 'facebook/regnet-y-040'
lowerCAmelCase : int = 'tabby, tabby cat'
lowerCAmelCase : int = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ = 3 , A_ = 1 , A_ = 1 , A_ = "relu" , **A_ , )-> str:
'''simple docstring'''
super().__init__(**A_ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
UpperCamelCase = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
UpperCamelCase = tf.keras.layers.ConvaD(
filters=A_ , kernel_size=A_ , strides=A_ , padding='VALID' , groups=A_ , use_bias=A_ , name='convolution' , )
UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='normalization' )
UpperCamelCase = ACTaFN[activation] if activation is not None else tf.identity
def UpperCAmelCase_ ( self , A_ )-> Any:
'''simple docstring'''
UpperCamelCase = self.convolution(self.padding(A_ ) )
UpperCamelCase = self.normalization(A_ )
UpperCamelCase = self.activation(A_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , **A_ )-> Optional[Any]:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = config.num_channels
UpperCamelCase = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def UpperCAmelCase_ ( self , A_ )-> List[Any]:
'''simple docstring'''
UpperCamelCase = shape_list(A_ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
UpperCamelCase = tf.transpose(A_ , perm=(0, 2, 3, 1) )
UpperCamelCase = self.embedder(A_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ = 2 , **A_ )-> List[Any]:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = tf.keras.layers.ConvaD(
filters=A_ , kernel_size=1 , strides=A_ , use_bias=A_ , name='convolution' )
UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='normalization' )
def UpperCAmelCase_ ( self , A_ , A_ = False )-> tf.Tensor:
'''simple docstring'''
return self.normalization(self.convolution(A_ ) , training=A_ )
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ , **A_ )-> Optional[Any]:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A_ , name='pooler' )
UpperCamelCase = [
tf.keras.layers.ConvaD(filters=A_ , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=A_ , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def UpperCAmelCase_ ( self , A_ )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.pooler(A_ )
for layer_module in self.attention:
UpperCamelCase = layer_module(A_ )
UpperCamelCase = hidden_state * pooled
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ , A_ , A_ = 1 , **A_ )-> Dict:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = in_channels != out_channels or stride != 1
UpperCamelCase = max(1 , out_channels // config.groups_width )
UpperCamelCase = (
TFRegNetShortCut(A_ , stride=A_ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
UpperCamelCase = [
TFRegNetConvLayer(A_ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
A_ , stride=A_ , groups=A_ , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(A_ , kernel_size=1 , activation=A_ , name='layer.2' ),
]
UpperCamelCase = ACTaFN[config.hidden_act]
def UpperCAmelCase_ ( self , A_ )-> Tuple:
'''simple docstring'''
UpperCamelCase = hidden_state
for layer_module in self.layers:
UpperCamelCase = layer_module(A_ )
UpperCamelCase = self.shortcut(A_ )
hidden_state += residual
UpperCamelCase = self.activation(A_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ , A_ , A_ = 1 , **A_ )-> Any:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = in_channels != out_channels or stride != 1
UpperCamelCase = max(1 , out_channels // config.groups_width )
UpperCamelCase = (
TFRegNetShortCut(A_ , stride=A_ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
UpperCamelCase = [
TFRegNetConvLayer(A_ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
A_ , stride=A_ , groups=A_ , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(A_ , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(A_ , kernel_size=1 , activation=A_ , name='layer.3' ),
]
UpperCamelCase = ACTaFN[config.hidden_act]
def UpperCAmelCase_ ( self , A_ )-> List[Any]:
'''simple docstring'''
UpperCamelCase = hidden_state
for layer_module in self.layers:
UpperCamelCase = layer_module(A_ )
UpperCamelCase = self.shortcut(A_ )
hidden_state += residual
UpperCamelCase = self.activation(A_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ , A_ , A_ = 2 , A_ = 2 , **A_ )-> Dict:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
UpperCamelCase = [
# downsampling is done in the first layer with stride of 2
layer(A_ , A_ , A_ , stride=A_ , name='layers.0' ),
*[layer(A_ , A_ , A_ , name=F'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def UpperCAmelCase_ ( self , A_ )-> List[Any]:
'''simple docstring'''
for layer_module in self.layers:
UpperCamelCase = layer_module(A_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , **A_ )-> str:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
A_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(A_ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(A_ , A_ , A_ , depth=A_ , name=F'''stages.{i+1}''' ) )
def UpperCAmelCase_ ( self , A_ , A_ = False , A_ = True )-> TFBaseModelOutputWithNoAttention:
'''simple docstring'''
UpperCamelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCamelCase = hidden_states + (hidden_state,)
UpperCamelCase = stage_module(A_ )
if output_hidden_states:
UpperCamelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=A_ , hidden_states=A_ )
@keras_serializable
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
lowerCAmelCase_ = RegNetConfig
def __init__( self , A_ , **A_ )-> Union[str, Any]:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = config
UpperCamelCase = TFRegNetEmbeddings(A_ , name='embedder' )
UpperCamelCase = TFRegNetEncoder(A_ , name='encoder' )
UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A_ , name='pooler' )
@unpack_inputs
def UpperCAmelCase_ ( self , A_ , A_ = None , A_ = None , A_ = False , )-> TFBaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.embedder(A_ , training=A_ )
UpperCamelCase = self.encoder(
A_ , output_hidden_states=A_ , return_dict=A_ , training=A_ )
UpperCamelCase = encoder_outputs[0]
UpperCamelCase = self.pooler(A_ )
# Change to NCHW output format have uniformity in the modules
UpperCamelCase = tf.transpose(A_ , perm=(0, 3, 1, 2) )
UpperCamelCase = tf.transpose(A_ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
UpperCamelCase = tuple([tf.transpose(A_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A_ , pooler_output=A_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = RegNetConfig
lowerCAmelCase_ = """regnet"""
lowerCAmelCase_ = """pixel_values"""
@property
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
lowerCAmelCase : str = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCAmelCase : List[str] = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , snake_case_ , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ , *A_ , **A_ )-> List[Any]:
'''simple docstring'''
super().__init__(A_ , *A_ , **A_ )
UpperCamelCase = TFRegNetMainLayer(A_ , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=A_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCAmelCase_ ( self , A_ , A_ = None , A_ = None , A_=False , )-> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
'''simple docstring'''
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.regnet(
pixel_values=A_ , output_hidden_states=A_ , return_dict=A_ , training=A_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , snake_case_ , )
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_):
def __init__( self , A_ , *A_ , **A_ )-> str:
'''simple docstring'''
super().__init__(A_ , *A_ , **A_ )
UpperCamelCase = config.num_labels
UpperCamelCase = TFRegNetMainLayer(A_ , name='regnet' )
# classification head
UpperCamelCase = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCAmelCase_ ( self , A_ = None , A_ = None , A_ = None , A_ = None , A_=False , )-> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
'''simple docstring'''
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.regnet(
A_ , output_hidden_states=A_ , return_dict=A_ , training=A_ )
UpperCamelCase = outputs.pooler_output if return_dict else outputs[1]
UpperCamelCase = self.classifier[0](A_ )
UpperCamelCase = self.classifier[1](A_ )
UpperCamelCase = None if labels is None else self.hf_compute_loss(labels=A_ , logits=A_ )
if not return_dict:
UpperCamelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=A_ , logits=A_ , hidden_states=outputs.hidden_states )
| 3 | 0 |
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
if digit_amount > 0:
return round(number - int(__lowerCamelCase ) , __lowerCamelCase )
return number - int(__lowerCamelCase )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.3_45, 1))
print(decimal_isolate(35.3_45, 2))
print(decimal_isolate(35.3_45, 3))
print(decimal_isolate(-14.7_89, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.1_23, 1))
print(decimal_isolate(-14.1_23, 2))
print(decimal_isolate(-14.1_23, 3))
| 81 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : Optional[int] = {
'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """perceiver"""
def __init__( self , A_=256 , A_=1280 , A_=768 , A_=1 , A_=26 , A_=8 , A_=8 , A_=None , A_=None , A_="kv" , A_=1 , A_=1 , A_="gelu" , A_=0.1 , A_=0.02 , A_=1e-12 , A_=True , A_=262 , A_=2048 , A_=56 , A_=[368, 496] , A_=16 , A_=1920 , A_=16 , A_=[1, 16, 224, 224] , **A_ , )-> str:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = num_latents
UpperCamelCase = d_latents
UpperCamelCase = d_model
UpperCamelCase = num_blocks
UpperCamelCase = num_self_attends_per_block
UpperCamelCase = num_self_attention_heads
UpperCamelCase = num_cross_attention_heads
UpperCamelCase = qk_channels
UpperCamelCase = v_channels
UpperCamelCase = cross_attention_shape_for_attention
UpperCamelCase = self_attention_widening_factor
UpperCamelCase = cross_attention_widening_factor
UpperCamelCase = hidden_act
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = use_query_residual
# masked language modeling attributes
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
# image classification attributes
UpperCamelCase = image_size
# flow attributes
UpperCamelCase = train_size
# multimodal autoencoding attributes
UpperCamelCase = num_frames
UpperCamelCase = audio_samples_per_frame
UpperCamelCase = samples_per_patch
UpperCamelCase = output_shape
class SCREAMING_SNAKE_CASE__ ( snake_case_):
@property
def UpperCAmelCase_ ( self )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def UpperCAmelCase_ ( self )-> float:
'''simple docstring'''
return 1e-4
def UpperCAmelCase_ ( self , A_ , A_ = -1 , A_ = -1 , A_ = -1 , A_ = False , A_ = None , A_ = 3 , A_ = 40 , A_ = 40 , )-> Mapping[str, Any]:
'''simple docstring'''
if isinstance(A_ , A_ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase = compute_effective_axis_dimension(
A_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase = preprocessor.num_special_tokens_to_add(A_ )
UpperCamelCase = compute_effective_axis_dimension(
A_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A_ )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase = [' '.join(['a'] ) * seq_length] * batch_size
UpperCamelCase = dict(preprocessor(A_ , return_tensors=A_ ) )
UpperCamelCase = inputs.pop('input_ids' )
return inputs
elif isinstance(A_ , A_ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase = compute_effective_axis_dimension(A_ , fixed_dimension=OnnxConfig.default_fixed_batch )
UpperCamelCase = self._generate_dummy_images(A_ , A_ , A_ , A_ )
UpperCamelCase = dict(preprocessor(images=A_ , return_tensors=A_ ) )
UpperCamelCase = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 3 | 0 |
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowerCamelCase = logging.getLogger(__name__)
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser(
description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"""
)
parser.add_argument(
"""--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset."""
)
parser.add_argument(
"""--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file."""
)
parser.add_argument("""--vocab_size""", default=30_522, type=int)
lowerCamelCase = parser.parse_args()
logger.info(F"Loading data from {args.data_file}")
with open(args.data_file, """rb""") as fp:
lowerCamelCase = pickle.load(fp)
logger.info("""Counting occurrences for MLM.""")
lowerCamelCase = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowerCamelCase = [0] * args.vocab_size
for k, v in counter.items():
lowerCamelCase = v
logger.info(F"Dump to {args.token_counts_dump}")
with open(args.token_counts_dump, """wb""") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 82 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Dict = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """mctct"""
def __init__( self , A_=8065 , A_=1536 , A_=36 , A_=6144 , A_=4 , A_=384 , A_=920 , A_=1e-5 , A_=0.3 , A_="relu" , A_=0.02 , A_=0.3 , A_=0.3 , A_=1 , A_=0 , A_=2 , A_=1 , A_=0.3 , A_=1 , A_=(7,) , A_=(3,) , A_=80 , A_=1 , A_=None , A_="sum" , A_=False , **A_ , )-> str:
'''simple docstring'''
super().__init__(**A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = intermediate_size
UpperCamelCase = num_attention_heads
UpperCamelCase = attention_head_dim
UpperCamelCase = max_position_embeddings
UpperCamelCase = layer_norm_eps
UpperCamelCase = layerdrop
UpperCamelCase = hidden_act
UpperCamelCase = initializer_range
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = pad_token_id
UpperCamelCase = bos_token_id
UpperCamelCase = eos_token_id
UpperCamelCase = conv_glu_dim
UpperCamelCase = conv_dropout
UpperCamelCase = num_conv_layers
UpperCamelCase = input_feat_per_channel
UpperCamelCase = input_channels
UpperCamelCase = conv_channels
UpperCamelCase = ctc_loss_reduction
UpperCamelCase = ctc_zero_infinity
# prevents config testing fail with exporting to json
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '
F'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
| 3 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : Optional[Any] = StableDiffusionXLImgaImgPipeline
snake_case__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
snake_case__ : int = PipelineTesterMixin.required_optional_params - {"latents"}
snake_case__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case__ : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case__ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCamelCase : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=__lowerCAmelCase , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , )
_lowerCamelCase : Dict = EulerDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
_lowerCamelCase : str = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_lowerCamelCase : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=3_2 , )
_lowerCamelCase : Dict = CLIPTextModel(__lowerCAmelCase )
_lowerCamelCase : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__lowerCAmelCase )
_lowerCamelCase : List[str] = CLIPTextModelWithProjection(__lowerCAmelCase )
_lowerCamelCase : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__lowerCAmelCase )
_lowerCamelCase : List[Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any=0 ):
"""simple docstring"""
_lowerCamelCase : Dict = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
_lowerCamelCase : str = image / 2 + 0.5
if str(__lowerCAmelCase ).startswith('''mps''' ):
_lowerCamelCase : Any = torch.manual_seed(__lowerCAmelCase )
else:
_lowerCamelCase : str = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.75,
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase : int = self.get_dummy_components()
_lowerCamelCase : Any = StableDiffusionXLImgaImgPipeline(**__lowerCAmelCase )
_lowerCamelCase : List[Any] = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self.get_dummy_inputs(__lowerCAmelCase )
_lowerCamelCase : List[Any] = sd_pipe(**__lowerCAmelCase ).images
_lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_lowerCamelCase : Any = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Dict = self.get_dummy_components()
_lowerCamelCase : Dict = StableDiffusionXLImgaImgPipeline(**__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = sd_pipe.to(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
# forward without prompt embeds
_lowerCamelCase : List[Any] = self.get_dummy_inputs(__lowerCAmelCase )
_lowerCamelCase : str = 3 * ['''this is a negative prompt''']
_lowerCamelCase : Dict = negative_prompt
_lowerCamelCase : int = 3 * [inputs['''prompt''']]
_lowerCamelCase : Dict = sd_pipe(**__lowerCAmelCase )
_lowerCamelCase : Tuple = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_lowerCamelCase : List[str] = self.get_dummy_inputs(__lowerCAmelCase )
_lowerCamelCase : str = 3 * ['''this is a negative prompt''']
_lowerCamelCase : int = 3 * [inputs.pop('''prompt''' )]
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Any = sd_pipe.encode_prompt(__lowerCAmelCase , negative_prompt=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = sd_pipe(
**__lowerCAmelCase , prompt_embeds=__lowerCAmelCase , negative_prompt_embeds=__lowerCAmelCase , pooled_prompt_embeds=__lowerCAmelCase , negative_pooled_prompt_embeds=__lowerCAmelCase , )
_lowerCamelCase : str = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple="cpu" , __lowerCAmelCase : Tuple=torch.floataa , __lowerCAmelCase : Dict=0 ):
"""simple docstring"""
_lowerCamelCase : Tuple = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = np.random.RandomState(__lowerCAmelCase ).standard_normal((1, 4, 6_4, 6_4) )
_lowerCamelCase : List[Any] = torch.from_numpy(__lowerCAmelCase ).to(device=__lowerCAmelCase , dtype=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : List[str] = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : List[Any] = self.get_inputs(__lowerCAmelCase )
_lowerCamelCase : int = pipe(**__lowerCAmelCase ).images
_lowerCamelCase : Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : Optional[int] = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 83 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCAmelCase : Tuple = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCAmelCase : Optional[int] = TaTokenizerFast
lowerCAmelCase : Any = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCAmelCase : Tuple = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
)
| 3 | 0 |
import re
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = re.compile(
r'^(?:0|94|\+94|0{2}94)' r'7(0|1|2|4|5|6|7|8)' r'(-| |)' r'\d{7}$' )
return bool(re.search(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
UpperCAmelCase = '''0094702343221'''
print(is_sri_lankan_phone_number(phone))
| 84 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=False , A_=True , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , )-> Dict:
'''simple docstring'''
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size if size is not None else {'height': 18, 'width': 20}
UpperCamelCase = do_thumbnail
UpperCamelCase = do_align_axis
UpperCamelCase = do_pad
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( snake_case_ , unittest.TestCase):
lowerCAmelCase_ = DonutImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
UpperCamelCase = DonutImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'do_thumbnail' ) )
self.assertTrue(hasattr(A_ , 'do_align_long_axis' ) )
self.assertTrue(hasattr(A_ , 'do_pad' ) )
self.assertTrue(hasattr(A_ , 'do_normalize' ) )
self.assertTrue(hasattr(A_ , 'image_mean' ) )
self.assertTrue(hasattr(A_ , 'image_std' ) )
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
pass
@is_flaky()
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 3 | 0 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class snake_case ( UpperCamelCase_ ):
lowercase_ = (DPMSolverSDEScheduler,)
lowercase_ = 10
def __lowercase( self : Any , **a_ : List[Any] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = {
'num_train_timesteps': 1100,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**a_ )
return config
def __lowercase( self : int )-> str:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=a_ )
def __lowercase( self : Any )-> Tuple:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=a_ , beta_end=a_ )
def __lowercase( self : List[Any] )-> Tuple:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a_ )
def __lowercase( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a_ )
def __lowercase( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : str = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : Optional[Any] = scheduler_class(**a_ )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE__ : List[str] = self.dummy_model()
SCREAMING_SNAKE_CASE__ : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE__ : Optional[int] = sample.to(a_ )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = scheduler.scale_model_input(a_ , a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(a_ , a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = scheduler.step(a_ , a_ , a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = output.prev_sample
SCREAMING_SNAKE_CASE__ : Dict = torch.sum(torch.abs(a_ ) )
SCREAMING_SNAKE_CASE__ : Any = torch.mean(torch.abs(a_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1e-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3
def __lowercase( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Any = self.get_scheduler_config(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE__ : List[Any] = scheduler_class(**a_ )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.dummy_model()
SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE__ : Optional[int] = sample.to(a_ )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE__ : List[Any] = scheduler.scale_model_input(a_ , a_ )
SCREAMING_SNAKE_CASE__ : Any = model(a_ , a_ )
SCREAMING_SNAKE_CASE__ : List[str] = scheduler.step(a_ , a_ , a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = output.prev_sample
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.sum(torch.abs(a_ ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.mean(torch.abs(a_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1e-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1e-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1e-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1e-3
def __lowercase( self : str )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : int = scheduler_class(**a_ )
scheduler.set_timesteps(self.num_inference_steps , device=a_ )
SCREAMING_SNAKE_CASE__ : str = self.dummy_model()
SCREAMING_SNAKE_CASE__ : Dict = self.dummy_sample_deter.to(a_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE__ : Optional[int] = scheduler.scale_model_input(a_ , a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ , a_ )
SCREAMING_SNAKE_CASE__ : Any = scheduler.step(a_ , a_ , a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = output.prev_sample
SCREAMING_SNAKE_CASE__ : List[Any] = torch.sum(torch.abs(a_ ) )
SCREAMING_SNAKE_CASE__ : Any = torch.mean(torch.abs(a_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1e-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3
def __lowercase( self : str )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Any = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : Optional[Any] = scheduler_class(**a_ , use_karras_sigmas=a_ )
scheduler.set_timesteps(self.num_inference_steps , device=a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.dummy_model()
SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_sample_deter.to(a_ ) * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE__ : Optional[int] = sample.to(a_ )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE__ : Tuple = scheduler.scale_model_input(a_ , a_ )
SCREAMING_SNAKE_CASE__ : str = model(a_ , a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = scheduler.step(a_ , a_ , a_ )
SCREAMING_SNAKE_CASE__ : Tuple = output.prev_sample
SCREAMING_SNAKE_CASE__ : List[Any] = torch.sum(torch.abs(a_ ) )
SCREAMING_SNAKE_CASE__ : str = torch.mean(torch.abs(a_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
| 85 |
'''simple docstring'''
def A_( A : list[int]):
UpperCamelCase = []
if len(A) == 1:
return [nums.copy()]
for _ in range(len(A)):
UpperCamelCase = nums.pop(0)
UpperCamelCase = permute(A)
for perm in permutations:
perm.append(A)
result.extend(A)
nums.append(A)
return result
def A_( A : str):
def backtrack(A : str):
if start == len(A) - 1:
output.append(nums[:])
else:
for i in range(A , len(A)):
UpperCamelCase , UpperCamelCase = nums[i], nums[start]
backtrack(start + 1)
UpperCamelCase , UpperCamelCase = nums[i], nums[start] # backtrack
UpperCamelCase = []
backtrack(0)
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
lowerCAmelCase : Dict = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 3 | 0 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__a :List[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__a :Union[str, Any] = [0, 25, 50]
__a :Any = [25, 50, 75]
__a :Tuple = fuzz.membership.trimf(X, abca)
__a :List[Any] = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__a :Union[str, Any] = np.ones(75)
__a :Tuple = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__a :Optional[Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__a :Optional[int] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__a :List[str] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__a :List[Any] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__a :Union[str, Any] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__a :Any = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__a :Any = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__a :Optional[int] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show() | 86 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def A_( A : float , A : float , A : int):
UpperCamelCase = x
UpperCamelCase = y
for step in range(A): # noqa: B007
UpperCamelCase = a * a - b * b + x
UpperCamelCase = 2 * a * b + y
UpperCamelCase = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def A_( A : float):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def A_( A : float):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(A , 1 , 1))
def A_( A : int = 800 , A : int = 600 , A : float = -0.6 , A : float = 0 , A : float = 3.2 , A : int = 50 , A : bool = True , ):
UpperCamelCase = Image.new('RGB' , (image_width, image_height))
UpperCamelCase = img.load()
# loop through the image-coordinates
for image_x in range(A):
for image_y in range(A):
# determine the figure-coordinates based on the image-coordinates
UpperCamelCase = figure_width / image_width * image_height
UpperCamelCase = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCamelCase = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCamelCase = get_distance(A , A , A)
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCamelCase = get_color_coded_rgb(A)
else:
UpperCamelCase = get_black_and_white_rgb(A)
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCAmelCase : Any = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 3 | 0 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_lowerCamelCase : Optional[Any] = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
_lowerCamelCase : Union[str, Any] = get_tests_dir("""fixtures/vocab.json""")
_lowerCamelCase : List[Any] = get_tests_dir("""fixtures""")
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[str]:
'''simple docstring'''
A__ = 0
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int:
'''simple docstring'''
A__ = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''')
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = WavaVecaConfig()
A__ = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''')
# save in new folder
model_config.save_pretrained(UpperCAmelCase__)
processor.save_pretrained(UpperCAmelCase__)
A__ = AutoProcessor.from_pretrained(UpperCAmelCase__)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , UpperCAmelCase__))
copyfile(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , '''vocab.json'''))
A__ = AutoProcessor.from_pretrained(UpperCAmelCase__)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = WavaVecaFeatureExtractor()
A__ = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''')
A__ = WavaVecaProcessor(UpperCAmelCase__ , UpperCAmelCase__)
# save in new folder
processor.save_pretrained(UpperCAmelCase__)
# drop `processor_class` in tokenizer
with open(os.path.join(UpperCAmelCase__ , UpperCAmelCase__) , '''r''') as f:
A__ = json.load(UpperCAmelCase__)
config_dict.pop('''processor_class''')
with open(os.path.join(UpperCAmelCase__ , UpperCAmelCase__) , '''w''') as f:
f.write(json.dumps(UpperCAmelCase__))
A__ = AutoProcessor.from_pretrained(UpperCAmelCase__)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = WavaVecaFeatureExtractor()
A__ = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''')
A__ = WavaVecaProcessor(UpperCAmelCase__ , UpperCAmelCase__)
# save in new folder
processor.save_pretrained(UpperCAmelCase__)
# drop `processor_class` in feature extractor
with open(os.path.join(UpperCAmelCase__ , UpperCAmelCase__) , '''r''') as f:
A__ = json.load(UpperCAmelCase__)
config_dict.pop('''processor_class''')
with open(os.path.join(UpperCAmelCase__ , UpperCAmelCase__) , '''w''') as f:
f.write(json.dumps(UpperCAmelCase__))
A__ = AutoProcessor.from_pretrained(UpperCAmelCase__)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = WavaVecaConfig(processor_class='''Wav2Vec2Processor''')
model_config.save_pretrained(UpperCAmelCase__)
# copy relevant files
copyfile(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , '''vocab.json'''))
# create emtpy sample processor
with open(os.path.join(UpperCAmelCase__ , UpperCAmelCase__) , '''w''') as f:
f.write('''{}''')
A__ = AutoProcessor.from_pretrained(UpperCAmelCase__)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
with self.assertRaises(UpperCAmelCase__):
A__ = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''')
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase__):
A__ = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCAmelCase__)
A__ = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCAmelCase__)
self.assertTrue(processor.special_attribute_present)
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''')
A__ = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present)
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''')
A__ = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''')
# Test we can also load the slow version
A__ = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCAmelCase__ , use_fast=UpperCAmelCase__)
A__ = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present)
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''')
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''')
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
try:
AutoConfig.register('''custom''' , UpperCAmelCase__)
AutoFeatureExtractor.register(UpperCAmelCase__ , UpperCAmelCase__)
AutoTokenizer.register(UpperCAmelCase__ , slow_tokenizer_class=UpperCAmelCase__)
AutoProcessor.register(UpperCAmelCase__ , UpperCAmelCase__)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase__):
AutoProcessor.register(UpperCAmelCase__ , UpperCAmelCase__)
# Now that the config is registered, it can be used as any other config with the auto-API
A__ = CustomFeatureExtractor.from_pretrained(UpperCAmelCase__)
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = os.path.join(UpperCAmelCase__ , '''vocab.txt''')
with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens]))
A__ = CustomTokenizer(UpperCAmelCase__)
A__ = CustomProcessor(UpperCAmelCase__ , UpperCAmelCase__)
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(UpperCAmelCase__)
A__ = AutoProcessor.from_pretrained(UpperCAmelCase__)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Tuple:
'''simple docstring'''
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = False
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = False
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''AutoFeatureExtractor'''
UpperCAmelCase__ = '''AutoTokenizer'''
UpperCAmelCase__ = False
try:
AutoConfig.register('''custom''' , UpperCAmelCase__)
AutoFeatureExtractor.register(UpperCAmelCase__ , UpperCAmelCase__)
AutoTokenizer.register(UpperCAmelCase__ , slow_tokenizer_class=UpperCAmelCase__)
AutoProcessor.register(UpperCAmelCase__ , UpperCAmelCase__)
# If remote code is not set, the default is to use local classes.
A__ = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''')
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''')
self.assertFalse(processor.special_attribute_present)
self.assertFalse(processor.feature_extractor.special_attribute_present)
self.assertFalse(processor.tokenizer.special_attribute_present)
# If remote code is disabled, we load the local ones.
A__ = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCAmelCase__)
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''')
self.assertFalse(processor.special_attribute_present)
self.assertFalse(processor.feature_extractor.special_attribute_present)
self.assertFalse(processor.tokenizer.special_attribute_present)
# If remote is enabled, we load from the Hub.
A__ = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCAmelCase__)
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''')
self.assertTrue(processor.special_attribute_present)
self.assertTrue(processor.feature_extractor.special_attribute_present)
self.assertTrue(processor.tokenizer.special_attribute_present)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict:
'''simple docstring'''
A__ = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''')
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''')
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]:
'''simple docstring'''
A__ = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''')
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''')
@is_staging_test
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Any) ->List[Any]:
'''simple docstring'''
A__ = TOKEN
HfFolder.save_token(UpperCAmelCase__)
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Union[str, Any]) ->Any:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-processor''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''')
except HTTPError:
pass
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any:
'''simple docstring'''
A__ = WavaVecaProcessor.from_pretrained(UpperCAmelCase__)
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCAmelCase__ , '''test-processor''') , push_to_hub=UpperCAmelCase__ , use_auth_token=self._token)
A__ = WavaVecaProcessor.from_pretrained(f"""{USER}/test-processor""")
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCAmelCase__ , getattr(new_processor.feature_extractor , UpperCAmelCase__))
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab())
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->str:
'''simple docstring'''
A__ = WavaVecaProcessor.from_pretrained(UpperCAmelCase__)
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCAmelCase__ , '''test-processor-org''') , push_to_hub=UpperCAmelCase__ , use_auth_token=self._token , organization='''valid_org''' , )
A__ = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''')
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCAmelCase__ , getattr(new_processor.feature_extractor , UpperCAmelCase__))
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab())
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
A__ = CustomFeatureExtractor.from_pretrained(UpperCAmelCase__)
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = os.path.join(UpperCAmelCase__ , '''vocab.txt''')
with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens]))
A__ = CustomTokenizer(UpperCAmelCase__)
A__ = CustomProcessor(UpperCAmelCase__ , UpperCAmelCase__)
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f"""{USER}/test-dynamic-processor""" , token=self._token)
A__ = Repository(UpperCAmelCase__ , clone_from=f"""{USER}/test-dynamic-processor""" , token=self._token)
processor.save_pretrained(UpperCAmelCase__)
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(UpperCAmelCase__ , '''tokenizer_config.json''')) as f:
A__ = json.load(UpperCAmelCase__)
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase__ , '''custom_feature_extraction.py''')))
self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase__ , '''custom_tokenization.py''')))
self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase__ , '''custom_processing.py''')))
repo.push_to_hub()
A__ = AutoProcessor.from_pretrained(f"""{USER}/test-dynamic-processor""" , trust_remote_code=UpperCAmelCase__)
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''')
| 87 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase : Optional[Any] = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 3 | 0 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class lowercase__ ( unittest.TestCase ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=30 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=0.02 , ) -> Dict:
_lowerCamelCase : int = parent
_lowerCamelCase : List[Any] = batch_size
_lowerCamelCase : Tuple = image_size
_lowerCamelCase : Dict = patch_size
_lowerCamelCase : Union[str, Any] = num_channels
_lowerCamelCase : List[Any] = is_training
_lowerCamelCase : Any = use_labels
_lowerCamelCase : List[Any] = hidden_size
_lowerCamelCase : Any = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : Union[str, Any] = intermediate_size
_lowerCamelCase : Any = hidden_act
_lowerCamelCase : Optional[Any] = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : List[str] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCamelCase : Optional[Any] = (image_size // patch_size) ** 2
_lowerCamelCase : Any = num_patches + 1
def UpperCamelCase_ ( self) -> Dict:
_lowerCamelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowerCamelCase : int = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, pixel_values
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Optional[int]:
_lowerCamelCase : Dict = FlaxViTModel(config=SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
_lowerCamelCase : Dict = (self.image_size, self.image_size)
_lowerCamelCase : int = (self.patch_size, self.patch_size)
_lowerCamelCase : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size))
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> List[str]:
_lowerCamelCase : str = self.type_sequence_label_size
_lowerCamelCase : List[Any] = FlaxViTForImageClassification(config=SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
_lowerCamelCase : Dict = 1
_lowerCamelCase : int = FlaxViTForImageClassification(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_lowerCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> Union[str, Any]:
_lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : str = config_and_inputs
_lowerCamelCase : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class lowercase__ ( A_ ,unittest.TestCase ):
__UpperCAmelCase = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def UpperCamelCase_ ( self) -> None:
_lowerCamelCase : Dict = FlaxViTModelTester(self)
_lowerCamelCase : Any = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37)
def UpperCamelCase_ ( self) -> int:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self) -> List[str]:
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> Any:
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> Dict:
_lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : int = model_class(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[Any] = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Optional[int] = [*signature.parameters.keys()]
_lowerCamelCase : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> List[str]:
_lowerCamelCase , _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_lowerCamelCase : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[str] = model_class(SCREAMING_SNAKE_CASE)
@jax.jit
def model_jitted(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE):
return model(pixel_values=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
with self.subTest("""JIT Enabled"""):
_lowerCamelCase : List[str] = model_jitted(**SCREAMING_SNAKE_CASE).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
_lowerCamelCase : List[Any] = model_jitted(**SCREAMING_SNAKE_CASE).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE) , len(SCREAMING_SNAKE_CASE))
for jitted_output, output in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def UpperCamelCase_ ( self) -> List[str]:
for model_class_name in self.all_model_classes:
_lowerCamelCase : Tuple = model_class_name.from_pretrained("""google/vit-base-patch16-224""")
_lowerCamelCase : List[Any] = model(np.ones((1, 3, 224, 224)))
self.assertIsNotNone(SCREAMING_SNAKE_CASE)
| 88 |
'''simple docstring'''
lowerCAmelCase : Optional[Any] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def A_( A : dict , A : str , A : Optional[Any]):
UpperCamelCase = set()
# keep track of all the paths to be checked
UpperCamelCase = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
UpperCamelCase = queue.pop(0)
# get the last node from the path
UpperCamelCase = path[-1]
if node not in explored:
UpperCamelCase = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
UpperCamelCase = list(A)
new_path.append(A)
queue.append(A)
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(A)
# in case there's no path between the 2 nodes
return []
def A_( A : dict , A : str , A : Tuple):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
UpperCamelCase = [start]
UpperCamelCase = set(A)
# Keep tab on distances from `start` node.
UpperCamelCase = {start: 0, target: -1}
while queue:
UpperCamelCase = queue.pop(0)
if node == target:
UpperCamelCase = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node])
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(A)
queue.append(A)
UpperCamelCase = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
| 3 | 0 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
SCREAMING_SNAKE_CASE : List[str] = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , ) -> str:
if attention_mask is None:
_lowercase : Optional[int] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_lowercase : Optional[Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_lowercase : Tuple = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowercase : Union[str, Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowercase : Tuple = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=7, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=99, lowerCamelCase=16, lowerCamelCase=2, lowerCamelCase=4, lowerCamelCase=4, lowerCamelCase="gelu", lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=32, lowerCamelCase=2, lowerCamelCase=1, lowerCamelCase=0, lowerCamelCase=0.0_2, ) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[int] = parent
_lowercase : Union[str, Any] = batch_size
_lowercase : List[Any] = seq_length
_lowercase : Optional[int] = is_training
_lowercase : List[str] = use_labels
_lowercase : List[str] = vocab_size
_lowercase : Union[str, Any] = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : Optional[Any] = num_attention_heads
_lowercase : str = intermediate_size
_lowercase : Optional[Any] = hidden_act
_lowercase : List[Any] = hidden_dropout_prob
_lowercase : Any = attention_probs_dropout_prob
_lowercase : Optional[int] = max_position_embeddings
_lowercase : Union[str, Any] = eos_token_id
_lowercase : int = pad_token_id
_lowercase : Tuple = bos_token_id
_lowercase : Optional[int] = initializer_range
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Union[str, Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size), 3, self.vocab_size)
_lowercase : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1), dtype=np.intaa)), -1)
_lowercase : Union[str, Any] = shift_tokens_right(lowerCamelCase, 1, 2)
_lowercase : Optional[Any] = BlenderbotConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, initializer_range=self.initializer_range, use_cache=lowerCamelCase, )
_lowercase : Any = prepare_blenderbot_inputs_dict(lowerCamelCase, lowerCamelCase, lowerCamelCase)
return config, inputs_dict
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase , _lowercase : Any = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[Any] = 20
_lowercase : Dict = model_class_name(lowerCamelCase)
_lowercase : List[Any] = model.encode(inputs_dict['input_ids'])
_lowercase , _lowercase : Optional[Any] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_lowercase : Dict = model.init_cache(decoder_input_ids.shape[0], lowerCamelCase, lowerCamelCase)
_lowercase : int = jnp.ones((decoder_input_ids.shape[0], max_decoder_length), dtype='i4')
_lowercase : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), )
_lowercase : Optional[Any] = model.decode(
decoder_input_ids[:, :-1], lowerCamelCase, decoder_attention_mask=lowerCamelCase, past_key_values=lowerCamelCase, decoder_position_ids=lowerCamelCase, )
_lowercase : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype='i4')
_lowercase : Optional[int] = model.decode(
decoder_input_ids[:, -1:], lowerCamelCase, decoder_attention_mask=lowerCamelCase, past_key_values=outputs_cache.past_key_values, decoder_position_ids=lowerCamelCase, )
_lowercase : int = model.decode(lowerCamelCase, lowerCamelCase)
_lowercase : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3, msg=F'''Max diff is {diff}''')
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : str = 20
_lowercase : Tuple = model_class_name(lowerCamelCase)
_lowercase : Any = model.encode(inputs_dict['input_ids'])
_lowercase , _lowercase : List[Any] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_lowercase : Tuple = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
], axis=-1, )
_lowercase : Dict = model.init_cache(decoder_input_ids.shape[0], lowerCamelCase, lowerCamelCase)
_lowercase : Optional[int] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), )
_lowercase : List[Any] = model.decode(
decoder_input_ids[:, :-1], lowerCamelCase, decoder_attention_mask=lowerCamelCase, past_key_values=lowerCamelCase, decoder_position_ids=lowerCamelCase, )
_lowercase : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype='i4')
_lowercase : List[str] = model.decode(
decoder_input_ids[:, -1:], lowerCamelCase, past_key_values=outputs_cache.past_key_values, decoder_attention_mask=lowerCamelCase, decoder_position_ids=lowerCamelCase, )
_lowercase : Tuple = model.decode(lowerCamelCase, lowerCamelCase, decoder_attention_mask=lowerCamelCase)
_lowercase : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3, msg=F'''Max diff is {diff}''')
@require_flax
class _lowerCamelCase( unittest.TestCase ):
lowercase_ : Optional[int] = 99
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Union[str, Any] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
], dtype=np.intaa, )
_lowercase : Optional[Any] = input_ids.shape[0]
_lowercase : int = BlenderbotConfig(
vocab_size=self.vocab_size, d_model=24, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=32, decoder_ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, )
return config, input_ids, batch_size
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase , _lowercase , _lowercase : Any = self._get_config_and_data()
_lowercase : Tuple = FlaxBlenderbotForConditionalGeneration(lowerCamelCase)
_lowercase : List[str] = lm_model(input_ids=lowerCamelCase)
_lowercase : Union[str, Any] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape, lowerCamelCase)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Dict = BlenderbotConfig(
vocab_size=self.vocab_size, d_model=14, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=8, decoder_ffn_dim=8, max_position_embeddings=48, )
_lowercase : str = FlaxBlenderbotForConditionalGeneration(lowerCamelCase)
_lowercase : List[str] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]], dtype=np.intaa)
_lowercase : Any = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]], dtype=np.intaa)
_lowercase : Tuple = lm_model(input_ids=lowerCamelCase, decoder_input_ids=lowerCamelCase)
_lowercase : List[Any] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape, lowerCamelCase)
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Any = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]], dtype=np.intaa)
_lowercase : Dict = shift_tokens_right(lowerCamelCase, 1, 2)
_lowercase : str = np.equal(lowerCamelCase, 1).astype(np.floataa).sum()
_lowercase : Union[str, Any] = np.equal(lowerCamelCase, 1).astype(np.floataa).sum()
self.assertEqual(shifted.shape, input_ids.shape)
self.assertEqual(lowerCamelCase, n_pad_before - 1)
self.assertTrue(np.equal(shifted[:, 0], 2).all())
@require_flax
class _lowerCamelCase( _a, unittest.TestCase, _a ):
lowercase_ : Any = True
lowercase_ : Optional[int] = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowercase_ : Dict = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = FlaxBlenderbotModelTester(self)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase , _lowercase : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCamelCase, lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase , _lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase, lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase , _lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_lowercase : Optional[Any] = self._prepare_for_class(lowerCamelCase, lowerCamelCase)
_lowercase : Any = model_class(lowerCamelCase)
@jax.jit
def encode_jitted(lowerCamelCase, lowerCamelCase=None, **lowerCamelCase):
return model.encode(input_ids=lowerCamelCase, attention_mask=lowerCamelCase)
with self.subTest('JIT Enabled'):
_lowercase : List[str] = encode_jitted(**lowerCamelCase).to_tuple()
with self.subTest('JIT Disabled'):
with jax.disable_jit():
_lowercase : Union[str, Any] = encode_jitted(**lowerCamelCase).to_tuple()
self.assertEqual(len(lowerCamelCase), len(lowerCamelCase))
for jitted_output, output in zip(lowerCamelCase, lowerCamelCase):
self.assertEqual(jitted_output.shape, output.shape)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase , _lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_lowercase : int = model_class(lowerCamelCase)
_lowercase : Union[str, Any] = model.encode(inputs_dict['input_ids'], inputs_dict['attention_mask'])
_lowercase : Optional[int] = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCamelCase, lowerCamelCase, lowerCamelCase):
return model.decode(
decoder_input_ids=lowerCamelCase, decoder_attention_mask=lowerCamelCase, encoder_outputs=lowerCamelCase, )
with self.subTest('JIT Enabled'):
_lowercase : Union[str, Any] = decode_jitted(**lowerCamelCase).to_tuple()
with self.subTest('JIT Disabled'):
with jax.disable_jit():
_lowercase : Optional[int] = decode_jitted(**lowerCamelCase).to_tuple()
self.assertEqual(len(lowerCamelCase), len(lowerCamelCase))
for jitted_output, output in zip(lowerCamelCase, lowerCamelCase):
self.assertEqual(jitted_output.shape, output.shape)
@slow
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_lowercase : Optional[Any] = model_class_name.from_pretrained('facebook/blenderbot-400M-distill')
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_lowercase : Any = np.ones((1, 1)) * model.config.eos_token_id
_lowercase : Optional[Any] = model(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
@unittest.skipUnless(jax_device != 'cpu', '3B test too slow on CPU.')
@slow
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Union[str, Any] = {'num_beams': 1, 'early_stopping': True, 'min_length': 15, 'max_length': 25}
_lowercase : Any = {'skip_special_tokens': True, 'clean_up_tokenization_spaces': True}
_lowercase : Any = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B', from_pt=lowerCamelCase)
_lowercase : Dict = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B')
_lowercase : int = ['Sam']
_lowercase : str = tokenizer(lowerCamelCase, return_tensors='jax')
_lowercase : Optional[Any] = model.generate(**lowerCamelCase, **lowerCamelCase)
_lowercase : Any = 'Sam is a great name. It means "sun" in Gaelic.'
_lowercase : Tuple = tokenizer.batch_decode(lowerCamelCase, **lowerCamelCase)
assert generated_txt[0].strip() == tgt_text
| 89 |
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class SCREAMING_SNAKE_CASE__ :
def __init__( self )-> Dict:
'''simple docstring'''
UpperCamelCase = ''
UpperCamelCase = ''
UpperCamelCase = []
UpperCamelCase = 0
UpperCamelCase = 256
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
def UpperCAmelCase_ ( self , A_ )-> str:
'''simple docstring'''
UpperCamelCase = cva.imread(A_ , 0 )
UpperCamelCase = copy.deepcopy(self.img )
UpperCamelCase , UpperCamelCase , UpperCamelCase = plt.hist(self.img.ravel() , 256 , [0, 256] , label='x' )
UpperCamelCase = np.sum(A_ )
for i in range(len(A_ ) ):
UpperCamelCase = x[i] / self.k
self.sk += prk
UpperCamelCase = (self.L - 1) * self.sk
if self.rem != 0:
UpperCamelCase = int(last % last )
UpperCamelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(A_ )
UpperCamelCase = int(np.ma.count(self.img ) / self.img[1].size )
UpperCamelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCamelCase = self.img[j][i]
if num != self.last_list[num]:
UpperCamelCase = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
plt.hist(self.img.ravel() , 256 , [0, 256] )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
lowerCAmelCase : str = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 3 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=3 , lowerCamelCase_=32 , lowerCamelCase_=3 , lowerCamelCase_=10 , lowerCamelCase_=[10, 20, 30, 40] , lowerCamelCase_=[1, 1, 2, 1] , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_="relu" , lowerCamelCase_=3 , lowerCamelCase_=None , ) -> Any:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = embeddings_size
lowerCAmelCase__ = hidden_sizes
lowerCAmelCase__ = depths
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = scope
lowerCAmelCase__ = len(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = self.get_config()
return config, pixel_values
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
lowerCAmelCase__ = FlaxRegNetModel(config=lowerCamelCase_ )
lowerCAmelCase__ = model(lowerCamelCase_ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = FlaxRegNetForImageClassification(config=lowerCamelCase_ )
lowerCAmelCase__ = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : Any = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
lowercase__ : List[Any] = False
lowercase__ : Union[str, Any] = False
lowercase__ : Optional[int] = False
def __SCREAMING_SNAKE_CASE ( self ) -> None:
lowerCAmelCase__ = FlaxRegNetModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
return
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(lowerCamelCase_ )
lowerCAmelCase__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
def check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = model_class(lowerCamelCase_ )
lowerCAmelCase__ = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
lowerCAmelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase__ = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 )
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase__ = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = model_class(lowerCamelCase_ )
@jax.jit
def model_jitted(lowerCamelCase_ , **lowerCamelCase_ ):
return model(pixel_values=lowerCamelCase_ , **lowerCamelCase_ )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase__ = model_jitted(**lowerCamelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase__ = model_jitted(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
for jitted_output, output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def _snake_case ( ) -> Union[str, Any]:
lowerCAmelCase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class a__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=lowerCamelCase_ , return_tensors='''np''' )
lowerCAmelCase__ = model(**lowerCamelCase_ )
# verify the logits
lowerCAmelCase__ = (1, 10_00)
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
lowerCAmelCase__ = jnp.array([-0.4_180, -1.5_051, -3.4_836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 ) ) | 90 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """unispeech-sat"""
def __init__( self , A_=32 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.1 , A_=0.1 , A_=0.02 , A_=1e-5 , A_="group" , A_="gelu" , A_=(512, 512, 512, 512, 512, 512, 512) , A_=(5, 2, 2, 2, 2, 2, 2) , A_=(10, 3, 3, 3, 3, 2, 2) , A_=False , A_=128 , A_=16 , A_=False , A_=True , A_=0.05 , A_=10 , A_=2 , A_=0.0 , A_=10 , A_=0 , A_=320 , A_=2 , A_=0.1 , A_=100 , A_=256 , A_=256 , A_=0.1 , A_="mean" , A_=False , A_=False , A_=256 , A_=(512, 512, 512, 512, 1500) , A_=(5, 3, 3, 1, 1) , A_=(1, 2, 3, 1, 1) , A_=512 , A_=0 , A_=1 , A_=2 , A_=504 , **A_ , )-> Tuple:
'''simple docstring'''
super().__init__(**A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ )
UpperCamelCase = hidden_size
UpperCamelCase = feat_extract_norm
UpperCamelCase = feat_extract_activation
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
UpperCamelCase = conv_bias
UpperCamelCase = num_conv_pos_embeddings
UpperCamelCase = num_conv_pos_embedding_groups
UpperCamelCase = len(self.conv_dim )
UpperCamelCase = num_hidden_layers
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = feat_proj_dropout
UpperCamelCase = final_dropout
UpperCamelCase = layerdrop
UpperCamelCase = layer_norm_eps
UpperCamelCase = initializer_range
UpperCamelCase = vocab_size
UpperCamelCase = num_clusters
UpperCamelCase = do_stable_layer_norm
UpperCamelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase = apply_spec_augment
UpperCamelCase = mask_time_prob
UpperCamelCase = mask_time_length
UpperCamelCase = mask_time_min_masks
UpperCamelCase = mask_feature_prob
UpperCamelCase = mask_feature_length
UpperCamelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCamelCase = num_codevectors_per_group
UpperCamelCase = num_codevector_groups
UpperCamelCase = contrastive_logits_temperature
UpperCamelCase = feat_quantizer_dropout
UpperCamelCase = num_negatives
UpperCamelCase = codevector_dim
UpperCamelCase = proj_codevector_dim
UpperCamelCase = diversity_loss_weight
# ctc loss
UpperCamelCase = ctc_loss_reduction
UpperCamelCase = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
UpperCamelCase = xvector_output_dim
@property
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 3 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: List[str] = ConsistencyModelPipeline
_lowerCamelCase: Optional[int] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_lowerCamelCase: Any = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
_lowerCamelCase: Any = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
A = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' ,subfolder='test_unet' ,)
return unet
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
A = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' ,subfolder='test_unet_class_cond' ,)
return unet
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[Any]=False ) -> Tuple:
if class_cond:
A = self.dummy_cond_unet
else:
A = self.dummy_uncond_unet
# Default to CM multistep sampler
A = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.0_02 ,sigma_max=80.0 ,)
A = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Union[str, Any] ,A_ : Dict=0 ) -> int:
if str(A_ ).startswith('mps' ):
A = torch.manual_seed(A_ )
else:
A = torch.Generator(device=A_ ).manual_seed(A_ )
A = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = ConsistencyModelPipeline(**A_ )
A = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs(A_ )
A = pipe(**A_ ).images
assert image.shape == (1, 32, 32, 3)
A = image[0, -3:, -3:, -1]
A = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components(class_cond=A_ )
A = ConsistencyModelPipeline(**A_ )
A = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs(A_ )
A = 0
A = pipe(**A_ ).images
assert image.shape == (1, 32, 32, 3)
A = image[0, -3:, -3:, -1]
A = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = ConsistencyModelPipeline(**A_ )
A = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs(A_ )
A = 1
A = None
A = pipe(**A_ ).images
assert image.shape == (1, 32, 32, 3)
A = image[0, -3:, -3:, -1]
A = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components(class_cond=A_ )
A = ConsistencyModelPipeline(**A_ )
A = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs(A_ )
A = 1
A = None
A = 0
A = pipe(**A_ ).images
assert image.shape == (1, 32, 32, 3)
A = image[0, -3:, -3:, -1]
A = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Any=0 ,A_ : Any=False ,A_ : List[Any]="cpu" ,A_ : str=torch.floataa ,A_ : Any=(1, 3, 64, 64) ) -> Tuple:
A = torch.manual_seed(A_ )
A = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
A = self.get_fixed_latents(seed=A_ ,device=A_ ,dtype=A_ ,shape=A_ )
A = latents
return inputs
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Dict=0 ,A_ : Any="cpu" ,A_ : List[Any]=torch.floataa ,A_ : List[str]=(1, 3, 64, 64) ) -> List[Any]:
if type(A_ ) == str:
A = torch.device(A_ )
A = torch.Generator(device=A_ ).manual_seed(A_ )
A = randn_tensor(A_ ,generator=A_ ,device=A_ ,dtype=A_ )
return latents
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
A = UNetaDModel.from_pretrained('diffusers/consistency_models' ,subfolder='diffusers_cd_imagenet64_l2' )
A = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.0_02 ,sigma_max=80.0 ,)
A = ConsistencyModelPipeline(unet=A_ ,scheduler=A_ )
pipe.to(torch_device=A_ )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_inputs()
A = pipe(**A_ ).images
assert image.shape == (1, 64, 64, 3)
A = image[0, -3:, -3:, -1]
A = np.array([0.08_88, 0.08_81, 0.06_66, 0.04_79, 0.02_92, 0.01_95, 0.02_01, 0.01_63, 0.02_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
A = UNetaDModel.from_pretrained('diffusers/consistency_models' ,subfolder='diffusers_cd_imagenet64_l2' )
A = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.0_02 ,sigma_max=80.0 ,)
A = ConsistencyModelPipeline(unet=A_ ,scheduler=A_ )
pipe.to(torch_device=A_ )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_inputs()
A = 1
A = None
A = pipe(**A_ ).images
assert image.shape == (1, 64, 64, 3)
A = image[0, -3:, -3:, -1]
A = np.array([0.03_40, 0.01_52, 0.00_63, 0.02_67, 0.02_21, 0.01_07, 0.04_16, 0.01_86, 0.02_17] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
A = UNetaDModel.from_pretrained('diffusers/consistency_models' ,subfolder='diffusers_cd_imagenet64_l2' )
A = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.0_02 ,sigma_max=80.0 ,)
A = ConsistencyModelPipeline(unet=A_ ,scheduler=A_ )
pipe.to(torch_device=A_ ,torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_inputs(get_fixed_latents=A_ ,device=A_ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=A_ ,enable_math=A_ ,enable_mem_efficient=A_ ):
A = pipe(**A_ ).images
assert image.shape == (1, 64, 64, 3)
A = image[0, -3:, -3:, -1]
A = np.array([0.18_75, 0.14_28, 0.12_89, 0.21_51, 0.20_92, 0.14_77, 0.18_77, 0.16_41, 0.13_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
A = UNetaDModel.from_pretrained('diffusers/consistency_models' ,subfolder='diffusers_cd_imagenet64_l2' )
A = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.0_02 ,sigma_max=80.0 ,)
A = ConsistencyModelPipeline(unet=A_ ,scheduler=A_ )
pipe.to(torch_device=A_ ,torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_inputs(get_fixed_latents=A_ ,device=A_ )
A = 1
A = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=A_ ,enable_math=A_ ,enable_mem_efficient=A_ ):
A = pipe(**A_ ).images
assert image.shape == (1, 64, 64, 3)
A = image[0, -3:, -3:, -1]
A = np.array([0.16_63, 0.19_48, 0.22_75, 0.16_80, 0.12_04, 0.12_45, 0.18_58, 0.13_38, 0.20_95] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 | 91 |
'''simple docstring'''
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self , A_ , A_=100 , A_=13 , A_=30 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=4 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=3 , A_=None , A_=[0, 1, 2, 3] , )-> Any:
'''simple docstring'''
UpperCamelCase = parent
UpperCamelCase = 100
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = scope
UpperCamelCase = out_indices
UpperCamelCase = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase = (image_size // patch_size) ** 2
UpperCamelCase = num_patches + 1
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ )-> List[str]:
'''simple docstring'''
UpperCamelCase = BeitModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ )-> Any:
'''simple docstring'''
UpperCamelCase = BeitForMaskedImageModeling(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.type_sequence_label_size
UpperCamelCase = BeitForImageClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = BeitForImageClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = self.num_labels
UpperCamelCase = BeitForSemanticSegmentation(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
UpperCamelCase = model(A_ , labels=A_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , unittest.TestCase):
lowerCAmelCase_ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{
"""feature-extraction""": BeitModel,
"""image-classification""": BeitForImageClassification,
"""image-segmentation""": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = BeitModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A_ )
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(A_ ), BeitForMaskedImageModeling]:
continue
UpperCamelCase = model_class(A_ )
model.to(A_ )
model.train()
UpperCamelCase = self._prepare_for_class(A_ , A_ , return_labels=A_ )
UpperCamelCase = model(**A_ ).loss
loss.backward()
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCamelCase = False
UpperCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(A_ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCamelCase = model_class(A_ )
model.gradient_checkpointing_enable()
model.to(A_ )
model.train()
UpperCamelCase = self._prepare_for_class(A_ , A_ , return_labels=A_ )
UpperCamelCase = model(**A_ ).loss
loss.backward()
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = _config_zero_init(A_ )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(config=A_ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = BeitModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def A_( ):
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
@cached_property
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(A_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).pixel_values.to(A_ )
# prepare bool_masked_pos
UpperCamelCase = torch.ones((1, 196) , dtype=torch.bool ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(pixel_values=A_ , bool_masked_pos=A_ )
UpperCamelCase = outputs.logits
# verify the logits
UpperCamelCase = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , A_ )
UpperCamelCase = torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(A_ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , A_ , atol=1e-2 ) )
@slow
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(A_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
UpperCamelCase = outputs.logits
# verify the logits
UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(logits.shape , A_ )
UpperCamelCase = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(A_ )
self.assertTrue(torch.allclose(logits[0, :3] , A_ , atol=1e-4 ) )
UpperCamelCase = 281
self.assertEqual(logits.argmax(-1 ).item() , A_ )
@slow
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
A_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
UpperCamelCase = outputs.logits
# verify the logits
UpperCamelCase = torch.Size((1, 21841) )
self.assertEqual(logits.shape , A_ )
UpperCamelCase = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(A_ )
self.assertTrue(torch.allclose(logits[0, :3] , A_ , atol=1e-4 ) )
UpperCamelCase = 2396
self.assertEqual(logits.argmax(-1 ).item() , A_ )
@slow
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
UpperCamelCase = model.to(A_ )
UpperCamelCase = BeitImageProcessor(do_resize=A_ , size=640 , do_center_crop=A_ )
UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
UpperCamelCase = Image.open(ds[0]['file'] )
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
UpperCamelCase = outputs.logits
# verify the logits
UpperCamelCase = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , A_ )
UpperCamelCase = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
UpperCamelCase = torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
] , device=A_ , )
else:
UpperCamelCase = torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
] , device=A_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A_ , atol=1e-4 ) )
@slow
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
UpperCamelCase = model.to(A_ )
UpperCamelCase = BeitImageProcessor(do_resize=A_ , size=640 , do_center_crop=A_ )
UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
UpperCamelCase = Image.open(ds[0]['file'] )
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
UpperCamelCase = outputs.logits.detach().cpu()
UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=A_ , target_sizes=[(500, 300)] )
UpperCamelCase = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , A_ )
UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=A_ )
UpperCamelCase = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , A_ )
| 3 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'beit'
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Any=8192 , UpperCAmelCase__ : Optional[int]=768 , UpperCAmelCase__ : Union[str, Any]=12 , UpperCAmelCase__ : Optional[Any]=12 , UpperCAmelCase__ : str=3072 , UpperCAmelCase__ : Dict="gelu" , UpperCAmelCase__ : Dict=0.0 , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : str=1E-12 , UpperCAmelCase__ : str=224 , UpperCAmelCase__ : Optional[Any]=16 , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : List[str]=False , UpperCAmelCase__ : int=False , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : List[Any]=[3, 5, 7, 11] , UpperCAmelCase__ : List[str]=[1, 2, 3, 6] , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Union[str, Any]=0.4 , UpperCAmelCase__ : List[Any]=256 , UpperCAmelCase__ : List[Any]=1 , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Optional[int]=255 , **UpperCAmelCase__ : Tuple , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
lowercase : List[str] =vocab_size
lowercase : str =hidden_size
lowercase : Any =num_hidden_layers
lowercase : List[str] =num_attention_heads
lowercase : List[Any] =intermediate_size
lowercase : Optional[int] =hidden_act
lowercase : Optional[int] =hidden_dropout_prob
lowercase : Any =attention_probs_dropout_prob
lowercase : List[str] =initializer_range
lowercase : int =layer_norm_eps
lowercase : str =image_size
lowercase : Union[str, Any] =patch_size
lowercase : Tuple =num_channels
lowercase : List[Any] =use_mask_token
lowercase : Optional[int] =use_absolute_position_embeddings
lowercase : Any =use_relative_position_bias
lowercase : int =use_shared_relative_position_bias
lowercase : str =layer_scale_init_value
lowercase : int =drop_path_rate
lowercase : Optional[int] =use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase : Dict =out_indices
lowercase : int =pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase : Optional[Any] =use_auxiliary_head
lowercase : Optional[int] =auxiliary_loss_weight
lowercase : Optional[int] =auxiliary_channels
lowercase : Any =auxiliary_num_convs
lowercase : Tuple =auxiliary_concat_input
lowercase : List[Any] =semantic_loss_ignore_index
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = version.parse('1.11' )
@property
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return 1E-4
| 92 |
'''simple docstring'''
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase : Dict = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( enum.Enum):
lowerCAmelCase_ = 0
lowerCAmelCase_ = 1
@add_end_docstrings(snake_case_)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """generated"""
def __init__( self , *A_ , **A_ )-> Optional[int]:
'''simple docstring'''
super().__init__(*A_ , **A_ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCAmelCase_ ( self , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , **A_ , )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = {}
if truncation is not None:
UpperCamelCase = truncation
UpperCamelCase = generate_kwargs
UpperCamelCase = {}
if return_tensors is not None and return_type is None:
UpperCamelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
UpperCamelCase = return_type
if clean_up_tokenization_spaces is not None:
UpperCamelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCamelCase = self.tokenizer.encode(A_ , add_special_tokens=A_ )
if len(A_ ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
UpperCamelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> Optional[int]:
'''simple docstring'''
return True
def UpperCAmelCase_ ( self , *A_ , A_ )-> Any:
'''simple docstring'''
UpperCamelCase = self.model.config.prefix if self.model.config.prefix is not None else ''
if isinstance(args[0] , A_ ):
if self.tokenizer.pad_token_id is None:
raise ValueError('Please make sure that the tokenizer has a pad_token_id when using a batch input' )
UpperCamelCase = ([prefix + arg for arg in args[0]],)
UpperCamelCase = True
elif isinstance(args[0] , A_ ):
UpperCamelCase = (prefix + args[0],)
UpperCamelCase = False
else:
raise ValueError(
F''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
UpperCamelCase = self.tokenizer(*A_ , padding=A_ , truncation=A_ , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *A_ , **A_ )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = super().__call__(*A_ , **A_ )
if (
isinstance(args[0] , A_ )
and all(isinstance(A_ , A_ ) for el in args[0] )
and all(len(A_ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCAmelCase_ ( self , A_ , A_=TruncationStrategy.DO_NOT_TRUNCATE , **A_ )-> Any:
'''simple docstring'''
UpperCamelCase = self._parse_and_tokenize(A_ , truncation=A_ , **A_ )
return inputs
def UpperCAmelCase_ ( self , A_ , **A_ )-> int:
'''simple docstring'''
if self.framework == "pt":
UpperCamelCase , UpperCamelCase = model_inputs['input_ids'].shape
elif self.framework == "tf":
UpperCamelCase , UpperCamelCase = tf.shape(model_inputs['input_ids'] ).numpy()
UpperCamelCase = generate_kwargs.get('min_length' , self.model.config.min_length )
UpperCamelCase = generate_kwargs.get('max_length' , self.model.config.max_length )
self.check_inputs(A_ , generate_kwargs['min_length'] , generate_kwargs['max_length'] )
UpperCamelCase = self.model.generate(**A_ , **A_ )
UpperCamelCase = output_ids.shape[0]
if self.framework == "pt":
UpperCamelCase = output_ids.reshape(A_ , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
UpperCamelCase = tf.reshape(A_ , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCAmelCase_ ( self , A_ , A_=ReturnType.TEXT , A_=False )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
UpperCamelCase = {F'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
UpperCamelCase = {
F'''{self.return_name}_text''': self.tokenizer.decode(
A_ , skip_special_tokens=A_ , clean_up_tokenization_spaces=A_ , )
}
records.append(A_ )
return records
@add_end_docstrings(snake_case_)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """summary"""
def __call__( self , *A_ , **A_ )-> Optional[int]:
'''simple docstring'''
return super().__call__(*A_ , **A_ )
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> bool:
'''simple docstring'''
if max_length < min_length:
logger.warning(F'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
F'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
'a summarization task, where outputs shorter than the input are typically wanted, you might '
F'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(snake_case_)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """translation"""
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> List[Any]:
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
F'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
'increasing your max_length manually, e.g. translator(\'...\', max_length=400)' )
return True
def UpperCAmelCase_ ( self , *A_ , A_=TruncationStrategy.DO_NOT_TRUNCATE , A_=None , A_=None )-> Dict:
'''simple docstring'''
if getattr(self.tokenizer , '_build_translation_inputs' , A_ ):
return self.tokenizer._build_translation_inputs(
*A_ , return_tensors=self.framework , truncation=A_ , src_lang=A_ , tgt_lang=A_ )
else:
return super()._parse_and_tokenize(*A_ , truncation=A_ )
def UpperCAmelCase_ ( self , A_=None , A_=None , **A_ )-> str:
'''simple docstring'''
UpperCamelCase , UpperCamelCase , UpperCamelCase = super()._sanitize_parameters(**A_ )
if src_lang is not None:
UpperCamelCase = src_lang
if tgt_lang is not None:
UpperCamelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
UpperCamelCase = kwargs.get('task' , self.task )
UpperCamelCase = task.split('_' )
if task and len(A_ ) == 4:
# translation, XX, to YY
UpperCamelCase = items[1]
UpperCamelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *A_ , **A_ )-> Any:
'''simple docstring'''
return super().__call__(*A_ , **A_ )
| 3 | 0 |
"""simple docstring"""
from math import factorial
__A = {str(digit): factorial(digit) for digit in range(10)}
def __A (_SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('Parameter number must be int' )
if number < 0:
raise ValueError('Parameter number must be greater than or equal to 0' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(_SCREAMING_SNAKE_CASE ) )
def __A (_SCREAMING_SNAKE_CASE = 60 , _SCREAMING_SNAKE_CASE = 100_0000 ) ->int:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('Parameters chain_length and number_limit must be int' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'Parameters chain_length and number_limit must be greater than 0' )
# the counter for the chains with the exact desired length
lowerCAmelCase__ :Union[str, Any] = 0
# the cached sizes of the previous chains
lowerCAmelCase__ :dict[int, int] = {}
for start_chain_element in range(1 , _SCREAMING_SNAKE_CASE ):
# The temporary set will contain the elements of the chain
lowerCAmelCase__ :Optional[Any] = set()
lowerCAmelCase__ :Tuple = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
lowerCAmelCase__ :Dict = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(_SCREAMING_SNAKE_CASE )
chain_set_length += 1
lowerCAmelCase__ :int = digit_factorial_sum(_SCREAMING_SNAKE_CASE )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
lowerCAmelCase__ :Union[str, Any] = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution()}''')
| 93 |
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = 0
lowerCAmelCase_ = False
lowerCAmelCase_ = 3.0
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=A_ ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} )
@require_cuda
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
UpperCamelCase = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
UpperCamelCase = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , A_ )
@require_multi_gpu
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(A_ , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase : Tuple = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
lowerCAmelCase : List[str] = Accelerator(kwargs_handlers=[ddp_scaler])
lowerCAmelCase : List[Any] = torch.nn.Linear(1_00, 2_00)
lowerCAmelCase : int = accelerator.prepare(model)
# Check the values changed in kwargs
lowerCAmelCase : Dict = ''
lowerCAmelCase : Dict = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 3 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 94 |
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_):
@register_to_config
def __init__( self , A_ , A_ = None , A_ = None )-> Tuple:
'''simple docstring'''
super().__init__()
UpperCamelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
UpperCamelCase = torch.zeros(A_ , A_ )
else:
UpperCamelCase = None
UpperCamelCase = torch.nn.Parameter(A_ )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ , )-> Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=A_ , transformer=A_ , text_encoder=A_ , tokenizer=A_ , scheduler=A_ , learned_classifier_free_sampling_embeddings=A_ , )
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> Tuple:
'''simple docstring'''
UpperCamelCase = len(A_ ) if isinstance(A_ , A_ ) else 1
# get prompt text embeddings
UpperCamelCase = self.tokenizer(
A_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
UpperCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
UpperCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=A_ )
# duplicate text embeddings for each generation per prompt
UpperCamelCase = prompt_embeds.repeat_interleave(A_ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
UpperCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings
UpperCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(A_ , 1 , 1 )
else:
UpperCamelCase = [''] * batch_size
UpperCamelCase = text_input_ids.shape[-1]
UpperCamelCase = self.tokenizer(
A_ , padding='max_length' , max_length=A_ , truncation=A_ , return_tensors='pt' , )
UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
UpperCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=A_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase = negative_prompt_embeds.shape[1]
UpperCamelCase = negative_prompt_embeds.repeat(1 , A_ , 1 )
UpperCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , A_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , A_ , A_ = 100 , A_ = 5.0 , A_ = 1.0 , A_ = 1 , A_ = None , A_ = None , A_ = "pil" , A_ = True , A_ = None , A_ = 1 , )-> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(A_ , A_ ):
UpperCamelCase = 1
elif isinstance(A_ , A_ ):
UpperCamelCase = len(A_ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(A_ )}''' )
UpperCamelCase = batch_size * num_images_per_prompt
UpperCamelCase = guidance_scale > 1.0
UpperCamelCase = self._encode_prompt(A_ , A_ , A_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A_ , A_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(A_ )}.''' )
# get the initial completely masked latents unless the user supplied it
UpperCamelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
UpperCamelCase = self.transformer.num_vector_embeds - 1
UpperCamelCase = torch.full(A_ , A_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
UpperCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(A_ , device=self.device )
UpperCamelCase = self.scheduler.timesteps.to(self.device )
UpperCamelCase = latents
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the sample if we are doing classifier free guidance
UpperCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
UpperCamelCase = self.transformer(A_ , encoder_hidden_states=A_ , timestep=A_ ).sample
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase = model_output.chunk(2 )
UpperCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(A_ , dim=1 , keepdim=A_ )
UpperCamelCase = self.truncate(A_ , A_ )
# remove `log(0)`'s (`-inf`s)
UpperCamelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase = self.scheduler.step(A_ , timestep=A_ , sample=A_ , generator=A_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A_ , A_ , A_ )
UpperCamelCase = self.vqvae.config.vq_embed_dim
UpperCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
UpperCamelCase = self.vqvae.quantize.get_codebook_entry(A_ , shape=A_ )
UpperCamelCase = self.vqvae.decode(A_ , force_not_quantize=A_ ).sample
UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
def UpperCAmelCase_ ( self , A_ , A_ )-> torch.FloatTensor:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = torch.sort(A_ , 1 , descending=A_ )
UpperCamelCase = torch.exp(A_ )
UpperCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
UpperCamelCase = torch.full_like(keep_mask[:, 0:1, :] , A_ )
UpperCamelCase = torch.cat((all_true, keep_mask) , dim=1 )
UpperCamelCase = keep_mask[:, :-1, :]
UpperCamelCase = keep_mask.gather(1 , indices.argsort(1 ) )
UpperCamelCase = log_p_x_0.clone()
UpperCamelCase = -torch.inf # -inf = log(0)
return rv
| 3 | 0 |
"""simple docstring"""
def snake_case ( A__ ,A__ ):
if not isinstance(A__ ,A__ ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(A__ ,A__ ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
UpperCAmelCase_ : Tuple = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(A__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Union[str, Any] = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 3 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase = {
'configuration_lxmert': ['LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LxmertConfig'],
'tokenization_lxmert': ['LxmertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['LxmertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'LxmertEncoder',
'LxmertForPreTraining',
'LxmertForQuestionAnswering',
'LxmertModel',
'LxmertPreTrainedModel',
'LxmertVisualFeatureEncoder',
'LxmertXLayer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLxmertForPreTraining',
'TFLxmertMainLayer',
'TFLxmertModel',
'TFLxmertPreTrainedModel',
'TFLxmertVisualFeatureEncoder',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 96 |
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ :
def __init__( self , A_ = None , A_ = None , A_=None , A_=None )-> Optional[Any]:
'''simple docstring'''
if not conversation_id:
UpperCamelCase = uuid.uuida()
if past_user_inputs is None:
UpperCamelCase = []
if generated_responses is None:
UpperCamelCase = []
UpperCamelCase = conversation_id
UpperCamelCase = past_user_inputs
UpperCamelCase = generated_responses
UpperCamelCase = text
def __eq__( self , A_ )-> List[Any]:
'''simple docstring'''
if not isinstance(A_ , A_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def UpperCAmelCase_ ( self , A_ , A_ = False )-> int:
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '''
F'''with: "{text}".''' )
UpperCamelCase = text
else:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" new input '''
F'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
UpperCamelCase = text
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
UpperCamelCase = None
def UpperCAmelCase_ ( self , A_ )-> int:
'''simple docstring'''
self.generated_responses.append(A_ )
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self )-> Any:
'''simple docstring'''
UpperCamelCase = F'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
UpperCamelCase = 'user' if is_user else 'bot'
output += F'''{name} >> {text} \n'''
return output
@add_end_docstrings(
snake_case_ , R"""
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
""" , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , *A_ , **A_ )-> Any:
'''simple docstring'''
super().__init__(*A_ , **A_ )
if self.tokenizer.pad_token_id is None:
UpperCamelCase = self.tokenizer.eos_token
def UpperCAmelCase_ ( self , A_=None , A_=None , A_=None , **A_ )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = {}
UpperCamelCase = {}
UpperCamelCase = {}
if min_length_for_response is not None:
UpperCamelCase = min_length_for_response
if minimum_tokens is not None:
UpperCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
UpperCamelCase = generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
UpperCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(A_ )
return preprocess_params, forward_params, postprocess_params
def __call__( self , A_ , A_=0 , **A_ )-> Any:
'''simple docstring'''
UpperCamelCase = super().__call__(A_ , num_workers=A_ , **A_ )
if isinstance(A_ , A_ ) and len(A_ ) == 1:
return outputs[0]
return outputs
def UpperCAmelCase_ ( self , A_ , A_=32 )-> Dict[str, Any]:
'''simple docstring'''
if not isinstance(A_ , A_ ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
F'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
UpperCamelCase = self.tokenizer._build_conversation_input_ids(A_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
UpperCamelCase = self._legacy_parse_and_tokenize(A_ )
if self.framework == "pt":
UpperCamelCase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
UpperCamelCase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def UpperCAmelCase_ ( self , A_ , A_=10 , **A_ )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = generate_kwargs.get('max_length' , self.model.config.max_length )
UpperCamelCase = model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
UpperCamelCase = max_length - minimum_tokens
UpperCamelCase = model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
UpperCamelCase = model_inputs['attention_mask'][:, -trim:]
UpperCamelCase = model_inputs.pop('conversation' )
UpperCamelCase = max_length
UpperCamelCase = self.model.generate(**A_ , **A_ )
if self.model.config.is_encoder_decoder:
UpperCamelCase = 1
else:
UpperCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def UpperCAmelCase_ ( self , A_ , A_=True )-> Tuple:
'''simple docstring'''
UpperCamelCase = model_outputs['output_ids']
UpperCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=A_ , clean_up_tokenization_spaces=A_ , )
UpperCamelCase = model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(A_ )
return conversation
def UpperCAmelCase_ ( self , A_ )-> Dict:
'''simple docstring'''
UpperCamelCase = self.tokenizer.eos_token_id
UpperCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(A_ , add_special_tokens=A_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(A_ , add_special_tokens=A_ ) )
if len(A_ ) > self.tokenizer.model_max_length:
UpperCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 3 | 0 |
import numpy as np
def a ( snake_case__: np.array ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 97 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
lowerCAmelCase : List[Any] = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
lowerCAmelCase : List[Any] = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
lowerCAmelCase : Tuple = BeautifulSoup(res.text, 'html.parser')
lowerCAmelCase : List[Any] = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f"""https://google.com{link.get('href')}""")
| 3 | 0 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='''session''' )
def a__ ( ) -> Any:
"""simple docstring"""
_UpperCamelCase = 10
_UpperCamelCase = datasets.Features(
{
'''tokens''': datasets.Sequence(datasets.Value('''string''' ) ),
'''labels''': datasets.Sequence(datasets.ClassLabel(names=['''negative''', '''positive'''] ) ),
'''answers''': datasets.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
'''id''': datasets.Value('''int64''' ),
} )
_UpperCamelCase = datasets.Dataset.from_dict(
{
'''tokens''': [['''foo'''] * 5] * n,
'''labels''': [[1] * 5] * n,
'''answers''': [{'''answer_start''': [97], '''text''': ['''1976''']}] * 10,
'''id''': list(range(lowercase ) ),
}, features=lowercase, )
return dataset
@pytest.fixture(scope='''session''' )
def a__ ( lowercase : Dict, lowercase : Any ) -> Dict:
"""simple docstring"""
_UpperCamelCase = str(tmp_path_factory.mktemp('''data''' ) / '''file.arrow''' )
dataset.map(cache_file_name=lowercase )
return filename
# FILE_CONTENT + files
lowercase__ : List[Any] = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope='''session''' )
def a__ ( lowercase : List[str] ) -> Dict:
"""simple docstring"""
_UpperCamelCase = tmp_path_factory.mktemp('''data''' ) / '''file.txt'''
_UpperCamelCase = FILE_CONTENT
with open(lowercase, '''w''' ) as f:
f.write(lowercase )
return filename
@pytest.fixture(scope='''session''' )
def a__ ( lowercase : Union[str, Any] ) -> Dict:
"""simple docstring"""
import bza
_UpperCamelCase = tmp_path_factory.mktemp('''data''' ) / '''file.txt.bz2'''
_UpperCamelCase = bytes(lowercase, '''utf-8''' )
with bza.open(lowercase, '''wb''' ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope='''session''' )
def a__ ( lowercase : Dict ) -> Any:
"""simple docstring"""
import gzip
_UpperCamelCase = str(tmp_path_factory.mktemp('''data''' ) / '''file.txt.gz''' )
_UpperCamelCase = bytes(lowercase, '''utf-8''' )
with gzip.open(lowercase, '''wb''' ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope='''session''' )
def a__ ( lowercase : List[Any] ) -> List[Any]:
"""simple docstring"""
if datasets.config.LZ4_AVAILABLE:
import lza.frame
_UpperCamelCase = tmp_path_factory.mktemp('''data''' ) / '''file.txt.lz4'''
_UpperCamelCase = bytes(lowercase, '''utf-8''' )
with lza.frame.open(lowercase, '''wb''' ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope='''session''' )
def a__ ( lowercase : List[Any], lowercase : str ) -> Optional[int]:
"""simple docstring"""
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
_UpperCamelCase = tmp_path_factory.mktemp('''data''' ) / '''file.txt.7z'''
with pyazr.SevenZipFile(lowercase, '''w''' ) as archive:
archive.write(lowercase, arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope='''session''' )
def a__ ( lowercase : int, lowercase : Tuple ) -> Dict:
"""simple docstring"""
import tarfile
_UpperCamelCase = tmp_path_factory.mktemp('''data''' ) / '''file.txt.tar'''
with tarfile.TarFile(lowercase, '''w''' ) as f:
f.add(lowercase, arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope='''session''' )
def a__ ( lowercase : int ) -> Tuple:
"""simple docstring"""
import lzma
_UpperCamelCase = tmp_path_factory.mktemp('''data''' ) / '''file.txt.xz'''
_UpperCamelCase = bytes(lowercase, '''utf-8''' )
with lzma.open(lowercase, '''wb''' ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope='''session''' )
def a__ ( lowercase : Tuple, lowercase : List[Any] ) -> Dict:
"""simple docstring"""
import zipfile
_UpperCamelCase = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zip'''
with zipfile.ZipFile(lowercase, '''w''' ) as f:
f.write(lowercase, arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope='''session''' )
def a__ ( lowercase : Dict ) -> Optional[int]:
"""simple docstring"""
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
_UpperCamelCase = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zst'''
_UpperCamelCase = bytes(lowercase, '''utf-8''' )
with zstd.open(lowercase, '''wb''' ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope='''session''' )
def a__ ( lowercase : Optional[int] ) -> Dict:
"""simple docstring"""
_UpperCamelCase = tmp_path_factory.mktemp('''data''' ) / '''file.xml'''
_UpperCamelCase = textwrap.dedent(
'''\
<?xml version="1.0" encoding="UTF-8" ?>
<tmx version="1.4">
<header segtype="sentence" srclang="ca" />
<body>
<tu>
<tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>
<tuv xml:lang="en"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>
<tuv xml:lang="en"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>
<tuv xml:lang="en"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>
<tuv xml:lang="en"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>
<tuv xml:lang="en"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>''' )
with open(lowercase, '''w''' ) as f:
f.write(lowercase )
return filename
lowercase__ : Union[str, Any] = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
lowercase__ : str = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
lowercase__ : List[Any] = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
lowercase__ : Optional[int] = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
lowercase__ : List[str] = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope='''session''' )
def a__ ( ) -> Optional[int]:
"""simple docstring"""
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='''session''' )
def a__ ( lowercase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = datasets.Dataset.from_dict(lowercase )
_UpperCamelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.arrow''' )
dataset.map(cache_file_name=lowercase )
return path
@pytest.fixture(scope='''session''' )
def a__ ( lowercase : Optional[int] ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.sqlite''' )
with contextlib.closing(sqlitea.connect(lowercase ) ) as con:
_UpperCamelCase = con.cursor()
cur.execute('''CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)''' )
for item in DATA:
cur.execute('''INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)''', tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='''session''' )
def a__ ( lowercase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.csv''' )
with open(lowercase, '''w''', newline='''''' ) as f:
_UpperCamelCase = csv.DictWriter(lowercase, fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase )
return path
@pytest.fixture(scope='''session''' )
def a__ ( lowercase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.csv''' )
with open(lowercase, '''w''', newline='''''' ) as f:
_UpperCamelCase = csv.DictWriter(lowercase, fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase )
return path
@pytest.fixture(scope='''session''' )
def a__ ( lowercase : Optional[int], lowercase : Any ) -> List[str]:
"""simple docstring"""
import bza
_UpperCamelCase = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.bz2'''
with open(lowercase, '''rb''' ) as f:
_UpperCamelCase = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowercase, '''wb''' ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope='''session''' )
def a__ ( lowercase : Union[str, Any], lowercase : List[Any], lowercase : Tuple ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(lowercase, '''w''' ) as f:
f.write(lowercase, arcname=os.path.basename(lowercase ) )
f.write(lowercase, arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope='''session''' )
def a__ ( lowercase : Any, lowercase : int, lowercase : int ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(lowercase, '''w''' ) as f:
f.write(lowercase, arcname=os.path.basename(csv_path.replace('''.csv''', '''.CSV''' ) ) )
f.write(lowercase, arcname=os.path.basename(csva_path.replace('''.csv''', '''.CSV''' ) ) )
return path
@pytest.fixture(scope='''session''' )
def a__ ( lowercase : Tuple, lowercase : Union[str, Any], lowercase : Any ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.csv.zip'''
with zipfile.ZipFile(lowercase, '''w''' ) as f:
f.write(lowercase, arcname=os.path.join('''main_dir''', os.path.basename(lowercase ) ) )
f.write(lowercase, arcname=os.path.join('''main_dir''', os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope='''session''' )
def a__ ( lowercase : Dict ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.parquet''' )
_UpperCamelCase = pa.schema(
{
'''col_1''': pa.string(),
'''col_2''': pa.intaa(),
'''col_3''': pa.floataa(),
} )
with open(lowercase, '''wb''' ) as f:
_UpperCamelCase = pq.ParquetWriter(lowercase, schema=lowercase )
_UpperCamelCase = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase ) )] for k in DATA[0]}, schema=lowercase )
writer.write_table(lowercase )
writer.close()
return path
@pytest.fixture(scope='''session''' )
def a__ ( lowercase : List[Any] ) -> Any:
"""simple docstring"""
_UpperCamelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
_UpperCamelCase = {'''data''': DATA}
with open(lowercase, '''w''' ) as f:
json.dump(lowercase, lowercase )
return path
@pytest.fixture(scope='''session''' )
def a__ ( lowercase : Union[str, Any] ) -> Dict:
"""simple docstring"""
_UpperCamelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
_UpperCamelCase = {'''data''': DATA_DICT_OF_LISTS}
with open(lowercase, '''w''' ) as f:
json.dump(lowercase, lowercase )
return path
@pytest.fixture(scope='''session''' )
def a__ ( lowercase : Any ) -> str:
"""simple docstring"""
_UpperCamelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl''' )
with open(lowercase, '''w''' ) as f:
for item in DATA:
f.write(json.dumps(lowercase ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def a__ ( lowercase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.jsonl''' )
with open(lowercase, '''w''' ) as f:
for item in DATA:
f.write(json.dumps(lowercase ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def a__ ( lowercase : Optional[Any] ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_312.jsonl''' )
with open(lowercase, '''w''' ) as f:
for item in DATA_312:
f.write(json.dumps(lowercase ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def a__ ( lowercase : List[str] ) -> str:
"""simple docstring"""
_UpperCamelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset-str.jsonl''' )
with open(lowercase, '''w''' ) as f:
for item in DATA_STR:
f.write(json.dumps(lowercase ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def a__ ( lowercase : Tuple, lowercase : Union[str, Any] ) -> Dict:
"""simple docstring"""
import gzip
_UpperCamelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt.gz''' )
with open(lowercase, '''rb''' ) as orig_file:
with gzip.open(lowercase, '''wb''' ) as zipped_file:
zipped_file.writelines(lowercase )
return path
@pytest.fixture(scope='''session''' )
def a__ ( lowercase : Tuple, lowercase : Dict ) -> Union[str, Any]:
"""simple docstring"""
import gzip
_UpperCamelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.gz''' )
with open(lowercase, '''rb''' ) as orig_file:
with gzip.open(lowercase, '''wb''' ) as zipped_file:
zipped_file.writelines(lowercase )
return path
@pytest.fixture(scope='''session''' )
def a__ ( lowercase : List[Any], lowercase : Dict, lowercase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.zip'''
with zipfile.ZipFile(lowercase, '''w''' ) as f:
f.write(lowercase, arcname=os.path.basename(lowercase ) )
f.write(lowercase, arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope='''session''' )
def a__ ( lowercase : int, lowercase : Union[str, Any], lowercase : List[str], lowercase : Optional[Any] ) -> int:
"""simple docstring"""
_UpperCamelCase = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.zip'''
with zipfile.ZipFile(lowercase, '''w''' ) as f:
f.write(lowercase, arcname=os.path.join('''nested''', os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope='''session''' )
def a__ ( lowercase : str, lowercase : List[str], lowercase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.jsonl.zip'''
with zipfile.ZipFile(lowercase, '''w''' ) as f:
f.write(lowercase, arcname=os.path.join('''main_dir''', os.path.basename(lowercase ) ) )
f.write(lowercase, arcname=os.path.join('''main_dir''', os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope='''session''' )
def a__ ( lowercase : Tuple, lowercase : Optional[int], lowercase : List[str] ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.tar'''
with tarfile.TarFile(lowercase, '''w''' ) as f:
f.add(lowercase, arcname=os.path.basename(lowercase ) )
f.add(lowercase, arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope='''session''' )
def a__ ( lowercase : Union[str, Any], lowercase : Tuple, lowercase : str, lowercase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.tar'''
with tarfile.TarFile(lowercase, '''w''' ) as f:
f.add(lowercase, arcname=os.path.join('''nested''', os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope='''session''' )
def a__ ( lowercase : List[str] ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = ['''0''', '''1''', '''2''', '''3''']
_UpperCamelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt''' )
with open(lowercase, '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def a__ ( lowercase : Optional[Any] ) -> Dict:
"""simple docstring"""
_UpperCamelCase = ['''0''', '''1''', '''2''', '''3''']
_UpperCamelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.txt''' )
with open(lowercase, '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def a__ ( lowercase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = ['''0''', '''1''', '''2''', '''3''']
_UpperCamelCase = tmp_path_factory.mktemp('''data''' ) / '''dataset.abc'''
with open(lowercase, '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def a__ ( lowercase : Optional[Any], lowercase : Any, lowercase : int ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = tmp_path_factory.mktemp('''data''' ) / '''dataset.text.zip'''
with zipfile.ZipFile(lowercase, '''w''' ) as f:
f.write(lowercase, arcname=os.path.basename(lowercase ) )
f.write(lowercase, arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope='''session''' )
def a__ ( lowercase : Tuple, lowercase : Any, lowercase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.text.zip'''
with zipfile.ZipFile(lowercase, '''w''' ) as f:
f.write(lowercase, arcname=os.path.join('''main_dir''', os.path.basename(lowercase ) ) )
f.write(lowercase, arcname=os.path.join('''main_dir''', os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope='''session''' )
def a__ ( lowercase : Union[str, Any], lowercase : List[str], lowercase : Dict ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = tmp_path_factory.mktemp('''data''' ) / '''dataset.ext.zip'''
with zipfile.ZipFile(lowercase, '''w''' ) as f:
f.write(lowercase, arcname=os.path.basename('''unsupported.ext''' ) )
f.write(lowercase, arcname=os.path.basename('''unsupported_2.ext''' ) )
return path
@pytest.fixture(scope='''session''' )
def a__ ( lowercase : Tuple ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = '''\n'''.join(['''First''', '''Second\u2029with Unicode new line''', '''Third'''] )
_UpperCamelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_with_unicode_new_lines.txt''' )
with open(lowercase, '''w''', encoding='''utf-8''' ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope='''session''' )
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
return os.path.join('''tests''', '''features''', '''data''', '''test_image_rgb.jpg''' )
@pytest.fixture(scope='''session''' )
def a__ ( ) -> Dict:
"""simple docstring"""
return os.path.join('''tests''', '''features''', '''data''', '''test_audio_44100.wav''' )
@pytest.fixture(scope='''session''' )
def a__ ( lowercase : int, lowercase : List[Any] ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = tmp_path_factory.mktemp('''data''' ) / '''dataset.img.zip'''
with zipfile.ZipFile(lowercase, '''w''' ) as f:
f.write(lowercase, arcname=os.path.basename(lowercase ) )
f.write(lowercase, arcname=os.path.basename(lowercase ).replace('''.jpg''', '''2.jpg''' ) )
return path
@pytest.fixture(scope='''session''' )
def a__ ( lowercase : Union[str, Any] ) -> str:
"""simple docstring"""
_UpperCamelCase = tmp_path_factory.mktemp('''data_dir''' )
(data_dir / "subdir").mkdir()
with open(data_dir / '''subdir''' / '''train.txt''', '''w''' ) as f:
f.write('''foo\n''' * 10 )
with open(data_dir / '''subdir''' / '''test.txt''', '''w''' ) as f:
f.write('''bar\n''' * 10 )
# hidden file
with open(data_dir / '''subdir''' / '''.test.txt''', '''w''' ) as f:
f.write('''bar\n''' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '''.subdir''' / '''train.txt''', '''w''' ) as f:
f.write('''foo\n''' * 10 )
with open(data_dir / '''.subdir''' / '''test.txt''', '''w''' ) as f:
f.write('''bar\n''' * 10 )
return data_dir
| 98 |
'''simple docstring'''
import numpy as np
def A_( A : str , A : Optional[Any] , A : Tuple , A : Optional[int] , A : str):
UpperCamelCase = int(np.ceil((x_end - xa) / h))
UpperCamelCase = np.zeros((n + 1,))
UpperCamelCase = ya
UpperCamelCase = xa
for k in range(A):
UpperCamelCase = f(A , y[k])
UpperCamelCase = f(x + 0.5 * h , y[k] + 0.5 * h * ka)
UpperCamelCase = f(x + 0.5 * h , y[k] + 0.5 * h * ka)
UpperCamelCase = f(x + h , y[k] + h * ka)
UpperCamelCase = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 0 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class __UpperCAmelCase :
"""simple docstring"""
def snake_case_ ( self , __A , __A , __A ):
return None
class __UpperCAmelCase :
"""simple docstring"""
def snake_case_ ( self , __A , __A , __A , __A ):
return None
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = [
# (model_name, model_kwargs)
("""bert-base-cased""", {}),
("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def snake_case_ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__A , """tf""" , 12 , **__A )
@require_torch
@slow
def snake_case_ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__A , """pt""" , 12 , **__A )
@require_torch
@slow
def snake_case_ ( self ):
from transformers import BertModel
__a = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(__A ) )
vocab_file.flush()
__a = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
__a = BertModel(BertConfig(vocab_size=len(__A ) ) )
model.save_pretrained(__A )
self._test_export(__A , """pt""" , 12 , __A )
@require_tf
@slow
def snake_case_ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
__a = self._test_export(__A , """tf""" , 12 , **__A )
__a = quantize(Path(__A ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__A ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def snake_case_ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
__a = self._test_export(__A , """pt""" , 12 , **__A )
__a = quantize(__A )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__A ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def snake_case_ ( self , __A , __A , __A , __A=None , **__A ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
__a = Path(__A ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__A , __A , __A , __A , __A , **__A )
return path
except Exception as e:
self.fail(__A )
@require_torch
@require_tokenizers
@slow
def snake_case_ ( self ):
from transformers import BertModel
__a = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
__a = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__A , __A , """pt""" )
@require_tf
@require_tokenizers
@slow
def snake_case_ ( self ):
from transformers import TFBertModel
__a = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
__a = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__A , __A , """tf""" )
def snake_case_ ( self , __A , __A , __A ):
__a = FeatureExtractionPipeline(__A , __A )
__a = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
__a , __a , __a , __a = infer_shapes(__A , __A )
# Assert all variables are present
self.assertEqual(len(__A ) , len(__A ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __A )
self.assertSequenceEqual(variable_names[3:] , __A )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def snake_case_ ( self ):
__a = ["""input_ids""", """attention_mask""", """token_type_ids"""]
__a = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
__a , __a = ensure_valid_input(FuncContiguousArgs() , __A , __A )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__A ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__A ) , set(__A ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__A , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
__a , __a = ensure_valid_input(FuncNonContiguousArgs() , __A , __A )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__A ) , 1 )
self.assertEqual(len(__A ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def snake_case_ ( self ):
__a = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
| 99 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case_)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True})
lowerCAmelCase_ = Features({"""text""": Value("""string""")})
lowerCAmelCase_ = Features({})
lowerCAmelCase_ = "text"
@property
def UpperCAmelCase_ ( self )-> Dict[str, str]:
'''simple docstring'''
return {self.text_column: "text"}
| 3 | 0 |
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase_ ( self ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(A_ ):
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
SCREAMING_SNAKE_CASE__ = FlaxAutoModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def lowercase_ ( self ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(A_ ):
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
SCREAMING_SNAKE_CASE__ = FlaxAutoModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def lowercase_ ( self ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(A_ )
SCREAMING_SNAKE_CASE__ = FlaxBertModel.from_pretrained(A_ )
SCREAMING_SNAKE_CASE__ = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**A_ ):
return model(**A_ )
eval(**A_ ).block_until_ready()
@slow
def lowercase_ ( self ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(A_ )
SCREAMING_SNAKE_CASE__ = FlaxRobertaModel.from_pretrained(A_ )
SCREAMING_SNAKE_CASE__ = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**A_ ):
return model(**A_ )
eval(**A_ ).block_until_ready()
def lowercase_ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
A_ , '''bert-base is not a local folder and is not a valid model identifier''' ):
SCREAMING_SNAKE_CASE__ = FlaxAutoModel.from_pretrained('''bert-base''' )
def lowercase_ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
A_ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
SCREAMING_SNAKE_CASE__ = FlaxAutoModel.from_pretrained(A_ , revision='''aaaaaa''' )
def lowercase_ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
A_ , '''hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack''' , ):
SCREAMING_SNAKE_CASE__ = FlaxAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def lowercase_ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(A_ , '''Use `from_pt=True` to load this model''' ):
SCREAMING_SNAKE_CASE__ = FlaxAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
| 100 |
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase : Union[str, Any] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
lowerCAmelCase : List[str] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def A_( A : list[float]):
UpperCamelCase = []
UpperCamelCase = len(A)
for i in range(A):
UpperCamelCase = -1
for j in range(i + 1 , A):
if arr[i] < arr[j]:
UpperCamelCase = arr[j]
break
result.append(A)
return result
def A_( A : list[float]):
UpperCamelCase = []
for i, outer in enumerate(A):
UpperCamelCase = -1
for inner in arr[i + 1 :]:
if outer < inner:
UpperCamelCase = inner
break
result.append(A)
return result
def A_( A : list[float]):
UpperCamelCase = len(A)
UpperCamelCase = []
UpperCamelCase = [-1] * arr_size
for index in reversed(range(A)):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
UpperCamelCase = stack[-1]
stack.append(arr[index])
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
lowerCAmelCase : Optional[Any] = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 3 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : List[Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE_ : Any = {
'do_resize': True,
'size': 2_0,
'do_center_crop': True,
'crop_size': 1_8,
'do_normalize': True,
'image_mean': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'image_std': [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(self.tmpdirname , lowerCAmelCase__ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self , **lowerCAmelCase__ ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def UpperCamelCase__ ( self , **lowerCAmelCase__ ):
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def UpperCamelCase__ ( self , **lowerCAmelCase__ ):
"""simple docstring"""
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE_ : str = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Dict = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : List[Any] = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : Optional[int] = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : List[Any] = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
SCREAMING_SNAKE_CASE_ : Any = self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0 )
SCREAMING_SNAKE_CASE_ : List[str] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowerCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : List[Any] = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processor(lowerCAmelCase__ , return_tensors='np' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor(images=lowerCAmelCase__ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Optional[Any] = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = 'lower newer'
SCREAMING_SNAKE_CASE_ : Tuple = processor(text=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer(lowerCAmelCase__ , padding='max_length' , max_length=6_4 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Any = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = 'lower newer'
SCREAMING_SNAKE_CASE_ : List[str] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Optional[Any] = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : str = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor.batch_decode(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Optional[int] = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = 'lower newer'
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 101 |
'''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def A_( A : str):
if not sentence:
return ""
UpperCamelCase = dict(zip(A , A))
return lower_to_upper.get(sentence[0] , sentence[0]) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 3 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ : Dict = {
"""configuration_vivit""": ["""VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VivitConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Any = ["""VivitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : int = [
"""VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VivitModel""",
"""VivitPreTrainedModel""",
"""VivitForVideoClassification""",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__magic_name__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 102 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCAmelCase : Dict = logging.get_logger(__name__)
# General docstring
lowerCAmelCase : str = 'RegNetConfig'
# Base docstring
lowerCAmelCase : str = 'facebook/regnet-y-040'
lowerCAmelCase : Dict = [1, 10_88, 7, 7]
# Image classification docstring
lowerCAmelCase : Dict = 'facebook/regnet-y-040'
lowerCAmelCase : int = 'tabby, tabby cat'
lowerCAmelCase : int = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ = 3 , A_ = 1 , A_ = 1 , A_ = "relu" , **A_ , )-> str:
'''simple docstring'''
super().__init__(**A_ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
UpperCamelCase = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
UpperCamelCase = tf.keras.layers.ConvaD(
filters=A_ , kernel_size=A_ , strides=A_ , padding='VALID' , groups=A_ , use_bias=A_ , name='convolution' , )
UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='normalization' )
UpperCamelCase = ACTaFN[activation] if activation is not None else tf.identity
def UpperCAmelCase_ ( self , A_ )-> Any:
'''simple docstring'''
UpperCamelCase = self.convolution(self.padding(A_ ) )
UpperCamelCase = self.normalization(A_ )
UpperCamelCase = self.activation(A_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , **A_ )-> Optional[Any]:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = config.num_channels
UpperCamelCase = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def UpperCAmelCase_ ( self , A_ )-> List[Any]:
'''simple docstring'''
UpperCamelCase = shape_list(A_ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
UpperCamelCase = tf.transpose(A_ , perm=(0, 2, 3, 1) )
UpperCamelCase = self.embedder(A_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ = 2 , **A_ )-> List[Any]:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = tf.keras.layers.ConvaD(
filters=A_ , kernel_size=1 , strides=A_ , use_bias=A_ , name='convolution' )
UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='normalization' )
def UpperCAmelCase_ ( self , A_ , A_ = False )-> tf.Tensor:
'''simple docstring'''
return self.normalization(self.convolution(A_ ) , training=A_ )
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ , **A_ )-> Optional[Any]:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A_ , name='pooler' )
UpperCamelCase = [
tf.keras.layers.ConvaD(filters=A_ , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=A_ , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def UpperCAmelCase_ ( self , A_ )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.pooler(A_ )
for layer_module in self.attention:
UpperCamelCase = layer_module(A_ )
UpperCamelCase = hidden_state * pooled
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ , A_ , A_ = 1 , **A_ )-> Dict:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = in_channels != out_channels or stride != 1
UpperCamelCase = max(1 , out_channels // config.groups_width )
UpperCamelCase = (
TFRegNetShortCut(A_ , stride=A_ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
UpperCamelCase = [
TFRegNetConvLayer(A_ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
A_ , stride=A_ , groups=A_ , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(A_ , kernel_size=1 , activation=A_ , name='layer.2' ),
]
UpperCamelCase = ACTaFN[config.hidden_act]
def UpperCAmelCase_ ( self , A_ )-> Tuple:
'''simple docstring'''
UpperCamelCase = hidden_state
for layer_module in self.layers:
UpperCamelCase = layer_module(A_ )
UpperCamelCase = self.shortcut(A_ )
hidden_state += residual
UpperCamelCase = self.activation(A_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ , A_ , A_ = 1 , **A_ )-> Any:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = in_channels != out_channels or stride != 1
UpperCamelCase = max(1 , out_channels // config.groups_width )
UpperCamelCase = (
TFRegNetShortCut(A_ , stride=A_ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
UpperCamelCase = [
TFRegNetConvLayer(A_ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
A_ , stride=A_ , groups=A_ , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(A_ , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(A_ , kernel_size=1 , activation=A_ , name='layer.3' ),
]
UpperCamelCase = ACTaFN[config.hidden_act]
def UpperCAmelCase_ ( self , A_ )-> List[Any]:
'''simple docstring'''
UpperCamelCase = hidden_state
for layer_module in self.layers:
UpperCamelCase = layer_module(A_ )
UpperCamelCase = self.shortcut(A_ )
hidden_state += residual
UpperCamelCase = self.activation(A_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ , A_ , A_ = 2 , A_ = 2 , **A_ )-> Dict:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
UpperCamelCase = [
# downsampling is done in the first layer with stride of 2
layer(A_ , A_ , A_ , stride=A_ , name='layers.0' ),
*[layer(A_ , A_ , A_ , name=F'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def UpperCAmelCase_ ( self , A_ )-> List[Any]:
'''simple docstring'''
for layer_module in self.layers:
UpperCamelCase = layer_module(A_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , **A_ )-> str:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
A_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(A_ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(A_ , A_ , A_ , depth=A_ , name=F'''stages.{i+1}''' ) )
def UpperCAmelCase_ ( self , A_ , A_ = False , A_ = True )-> TFBaseModelOutputWithNoAttention:
'''simple docstring'''
UpperCamelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCamelCase = hidden_states + (hidden_state,)
UpperCamelCase = stage_module(A_ )
if output_hidden_states:
UpperCamelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=A_ , hidden_states=A_ )
@keras_serializable
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
lowerCAmelCase_ = RegNetConfig
def __init__( self , A_ , **A_ )-> Union[str, Any]:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = config
UpperCamelCase = TFRegNetEmbeddings(A_ , name='embedder' )
UpperCamelCase = TFRegNetEncoder(A_ , name='encoder' )
UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A_ , name='pooler' )
@unpack_inputs
def UpperCAmelCase_ ( self , A_ , A_ = None , A_ = None , A_ = False , )-> TFBaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.embedder(A_ , training=A_ )
UpperCamelCase = self.encoder(
A_ , output_hidden_states=A_ , return_dict=A_ , training=A_ )
UpperCamelCase = encoder_outputs[0]
UpperCamelCase = self.pooler(A_ )
# Change to NCHW output format have uniformity in the modules
UpperCamelCase = tf.transpose(A_ , perm=(0, 3, 1, 2) )
UpperCamelCase = tf.transpose(A_ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
UpperCamelCase = tuple([tf.transpose(A_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A_ , pooler_output=A_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = RegNetConfig
lowerCAmelCase_ = """regnet"""
lowerCAmelCase_ = """pixel_values"""
@property
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
lowerCAmelCase : str = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCAmelCase : List[str] = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , snake_case_ , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ , *A_ , **A_ )-> List[Any]:
'''simple docstring'''
super().__init__(A_ , *A_ , **A_ )
UpperCamelCase = TFRegNetMainLayer(A_ , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=A_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCAmelCase_ ( self , A_ , A_ = None , A_ = None , A_=False , )-> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
'''simple docstring'''
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.regnet(
pixel_values=A_ , output_hidden_states=A_ , return_dict=A_ , training=A_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , snake_case_ , )
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_):
def __init__( self , A_ , *A_ , **A_ )-> str:
'''simple docstring'''
super().__init__(A_ , *A_ , **A_ )
UpperCamelCase = config.num_labels
UpperCamelCase = TFRegNetMainLayer(A_ , name='regnet' )
# classification head
UpperCamelCase = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCAmelCase_ ( self , A_ = None , A_ = None , A_ = None , A_ = None , A_=False , )-> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
'''simple docstring'''
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.regnet(
A_ , output_hidden_states=A_ , return_dict=A_ , training=A_ )
UpperCamelCase = outputs.pooler_output if return_dict else outputs[1]
UpperCamelCase = self.classifier[0](A_ )
UpperCamelCase = self.classifier[1](A_ )
UpperCamelCase = None if labels is None else self.hf_compute_loss(labels=A_ , logits=A_ )
if not return_dict:
UpperCamelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=A_ , logits=A_ , hidden_states=outputs.hidden_states )
| 3 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
_snake_case = [[1, 2, 4], [1, 2, 3, 4]]
_snake_case = DisjunctiveConstraint(__lowerCamelCase )
self.assertTrue(isinstance(dc.token_ids , __lowerCamelCase ) )
with self.assertRaises(__lowerCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__lowerCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
_snake_case = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__lowerCamelCase ):
DisjunctiveConstraint(__lowerCamelCase ) # fails here
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case = [[1, 2, 3], [1, 2, 4]]
_snake_case = DisjunctiveConstraint(__lowerCamelCase )
_snake_case , _snake_case , _snake_case = dc.update(1 )
_snake_case = stepped is True and completed is False and reset is False
self.assertTrue(__lowerCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_snake_case , _snake_case , _snake_case = dc.update(2 )
_snake_case = stepped is True and completed is False and reset is False
self.assertTrue(__lowerCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_snake_case , _snake_case , _snake_case = dc.update(3 )
_snake_case = stepped is True and completed is True and reset is False
self.assertTrue(__lowerCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_snake_case = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
_snake_case = DisjunctiveConstraint(__lowerCamelCase )
_snake_case , _snake_case , _snake_case = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_snake_case , _snake_case , _snake_case = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_snake_case , _snake_case , _snake_case = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
_snake_case , _snake_case , _snake_case = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
_snake_case , _snake_case , _snake_case = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
_snake_case , _snake_case , _snake_case = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
_snake_case , _snake_case , _snake_case = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 103 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : Optional[int] = {
'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """perceiver"""
def __init__( self , A_=256 , A_=1280 , A_=768 , A_=1 , A_=26 , A_=8 , A_=8 , A_=None , A_=None , A_="kv" , A_=1 , A_=1 , A_="gelu" , A_=0.1 , A_=0.02 , A_=1e-12 , A_=True , A_=262 , A_=2048 , A_=56 , A_=[368, 496] , A_=16 , A_=1920 , A_=16 , A_=[1, 16, 224, 224] , **A_ , )-> str:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = num_latents
UpperCamelCase = d_latents
UpperCamelCase = d_model
UpperCamelCase = num_blocks
UpperCamelCase = num_self_attends_per_block
UpperCamelCase = num_self_attention_heads
UpperCamelCase = num_cross_attention_heads
UpperCamelCase = qk_channels
UpperCamelCase = v_channels
UpperCamelCase = cross_attention_shape_for_attention
UpperCamelCase = self_attention_widening_factor
UpperCamelCase = cross_attention_widening_factor
UpperCamelCase = hidden_act
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = use_query_residual
# masked language modeling attributes
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
# image classification attributes
UpperCamelCase = image_size
# flow attributes
UpperCamelCase = train_size
# multimodal autoencoding attributes
UpperCamelCase = num_frames
UpperCamelCase = audio_samples_per_frame
UpperCamelCase = samples_per_patch
UpperCamelCase = output_shape
class SCREAMING_SNAKE_CASE__ ( snake_case_):
@property
def UpperCAmelCase_ ( self )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def UpperCAmelCase_ ( self )-> float:
'''simple docstring'''
return 1e-4
def UpperCAmelCase_ ( self , A_ , A_ = -1 , A_ = -1 , A_ = -1 , A_ = False , A_ = None , A_ = 3 , A_ = 40 , A_ = 40 , )-> Mapping[str, Any]:
'''simple docstring'''
if isinstance(A_ , A_ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase = compute_effective_axis_dimension(
A_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase = preprocessor.num_special_tokens_to_add(A_ )
UpperCamelCase = compute_effective_axis_dimension(
A_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A_ )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase = [' '.join(['a'] ) * seq_length] * batch_size
UpperCamelCase = dict(preprocessor(A_ , return_tensors=A_ ) )
UpperCamelCase = inputs.pop('input_ids' )
return inputs
elif isinstance(A_ , A_ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase = compute_effective_axis_dimension(A_ , fixed_dimension=OnnxConfig.default_fixed_batch )
UpperCamelCase = self._generate_dummy_images(A_ , A_ , A_ , A_ )
UpperCamelCase = dict(preprocessor(images=A_ , return_tensors=A_ ) )
UpperCamelCase = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 3 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase = {
"""configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegaForCausalLM""",
"""MegaForMaskedLM""",
"""MegaForMultipleChoice""",
"""MegaForQuestionAnswering""",
"""MegaForSequenceClassification""",
"""MegaForTokenClassification""",
"""MegaModel""",
"""MegaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 104 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Dict = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """mctct"""
def __init__( self , A_=8065 , A_=1536 , A_=36 , A_=6144 , A_=4 , A_=384 , A_=920 , A_=1e-5 , A_=0.3 , A_="relu" , A_=0.02 , A_=0.3 , A_=0.3 , A_=1 , A_=0 , A_=2 , A_=1 , A_=0.3 , A_=1 , A_=(7,) , A_=(3,) , A_=80 , A_=1 , A_=None , A_="sum" , A_=False , **A_ , )-> str:
'''simple docstring'''
super().__init__(**A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = intermediate_size
UpperCamelCase = num_attention_heads
UpperCamelCase = attention_head_dim
UpperCamelCase = max_position_embeddings
UpperCamelCase = layer_norm_eps
UpperCamelCase = layerdrop
UpperCamelCase = hidden_act
UpperCamelCase = initializer_range
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = pad_token_id
UpperCamelCase = bos_token_id
UpperCamelCase = eos_token_id
UpperCamelCase = conv_glu_dim
UpperCamelCase = conv_dropout
UpperCamelCase = num_conv_layers
UpperCamelCase = input_feat_per_channel
UpperCamelCase = input_channels
UpperCamelCase = conv_channels
UpperCamelCase = ctc_loss_reduction
UpperCamelCase = ctc_zero_infinity
# prevents config testing fail with exporting to json
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '
F'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
| 3 | 0 |
import os
import sys
import unittest
UpperCamelCase__ : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
UpperCamelCase__ : Tuple = os.path.join(git_repo_path, '''src''', '''transformers''')
UpperCamelCase__ : List[Any] = '''
{0} = None
'''
UpperCamelCase__ : str = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
'''
UpperCamelCase__ : List[Any] = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' )
self.assertIsNone(snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = find_backend(' if not is_tokenizers_available():' )
self.assertEqual(snake_case__ ,'tokenizers' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = find_backend(' if not is_tensorflow_text_available():' )
self.assertEqual(snake_case__ ,'tensorflow_text' )
SCREAMING_SNAKE_CASE_ : int = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' )
self.assertEqual(snake_case__ ,'sentencepiece_and_tokenizers' )
SCREAMING_SNAKE_CASE_ : str = find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' )
self.assertEqual(snake_case__ ,'sentencepiece_and_tensorflow_text' )
SCREAMING_SNAKE_CASE_ : Tuple = find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' )
self.assertEqual(snake_case__ ,'sentencepiece_and_tokenizers_and_vision' )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' ,snake_case__ )
self.assertIn('tensorflow_text' ,snake_case__ )
self.assertIn('sentencepiece_and_tokenizers' ,snake_case__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel' ,objects['torch'] )
self.assertIn('TFBertModel' ,objects['tf'] )
self.assertIn('FlaxBertModel' ,objects['flax'] )
self.assertIn('BertModel' ,objects['torch'] )
self.assertIn('TFBertTokenizer' ,objects['tensorflow_text'] )
self.assertIn('convert_slow_tokenizer' ,objects['sentencepiece_and_tokenizers'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = create_dummy_object('CONSTANT' ,'\'torch\'' )
self.assertEqual(snake_case__ ,'\nCONSTANT = None\n' )
SCREAMING_SNAKE_CASE_ : Dict = create_dummy_object('function' ,'\'torch\'' )
self.assertEqual(
snake_case__ ,'\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
SCREAMING_SNAKE_CASE_ : List[str] = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n'
SCREAMING_SNAKE_CASE_ : Dict = create_dummy_object('FakeClass' ,'\'torch\'' )
self.assertEqual(snake_case__ ,snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n'
SCREAMING_SNAKE_CASE_ : Dict = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] ,snake_case__ )
| 105 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCAmelCase : Tuple = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCAmelCase : Optional[int] = TaTokenizerFast
lowerCAmelCase : Any = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCAmelCase : Tuple = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
)
| 3 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case :str =logging.get_logger(__name__)
__snake_case :Tuple ={
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : Tuple = 'speech_to_text_2'
A_ : Union[str, Any] = ['past_key_values']
A_ : Dict = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : int , __UpperCamelCase : Union[str, Any]=10_000 , __UpperCamelCase : List[Any]=6 , __UpperCamelCase : Any=2_048 , __UpperCamelCase : Optional[Any]=4 , __UpperCamelCase : Optional[Any]=0.0 , __UpperCamelCase : Any=True , __UpperCamelCase : List[str]="relu" , __UpperCamelCase : Optional[Any]=256 , __UpperCamelCase : Optional[Any]=0.1 , __UpperCamelCase : Dict=0.0 , __UpperCamelCase : int=0.0 , __UpperCamelCase : List[Any]=0.0_2 , __UpperCamelCase : List[Any]=2 , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Optional[Any]=1 , __UpperCamelCase : List[str]=0 , __UpperCamelCase : Tuple=2 , __UpperCamelCase : Dict=1_024 , **__UpperCamelCase : Any , ) -> List[Any]:
A = vocab_size
A = d_model
A = decoder_ffn_dim
A = decoder_layers
A = decoder_attention_heads
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = decoder_layerdrop
A = use_cache
A = decoder_layers
A = scale_embedding # scale factor will be sqrt(d_model) if True
A = max_target_positions
super().__init__(
pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , **__UpperCamelCase , ) | 106 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=False , A_=True , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , )-> Dict:
'''simple docstring'''
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size if size is not None else {'height': 18, 'width': 20}
UpperCamelCase = do_thumbnail
UpperCamelCase = do_align_axis
UpperCamelCase = do_pad
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( snake_case_ , unittest.TestCase):
lowerCAmelCase_ = DonutImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
UpperCamelCase = DonutImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'do_thumbnail' ) )
self.assertTrue(hasattr(A_ , 'do_align_long_axis' ) )
self.assertTrue(hasattr(A_ , 'do_pad' ) )
self.assertTrue(hasattr(A_ , 'do_normalize' ) )
self.assertTrue(hasattr(A_ , 'image_mean' ) )
self.assertTrue(hasattr(A_ , 'image_std' ) )
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
pass
@is_flaky()
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 3 | 0 |
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_UpperCAmelCase : Any = '''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
'''
_UpperCAmelCase : str = '''\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
'''
_UpperCAmelCase : Union[str, Any] = '''\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results["google_bleu"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results["google_bleu"], 2))
0.4
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : List[Any] ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string', id='token' ), id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string', id='token' ), id='sequence' ), id='references' ),
} ), )
def __UpperCAmelCase ( self : Dict, UpperCamelCase__ : List[List[List[str]]], UpperCamelCase__ : List[List[str]], UpperCamelCase__ : int = 1, UpperCamelCase__ : int = 4, ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=UpperCamelCase__, hypotheses=UpperCamelCase__, min_len=UpperCamelCase__, max_len=UpperCamelCase__ )
}
| 107 |
'''simple docstring'''
def A_( A : list[int]):
UpperCamelCase = []
if len(A) == 1:
return [nums.copy()]
for _ in range(len(A)):
UpperCamelCase = nums.pop(0)
UpperCamelCase = permute(A)
for perm in permutations:
perm.append(A)
result.extend(A)
nums.append(A)
return result
def A_( A : str):
def backtrack(A : str):
if start == len(A) - 1:
output.append(nums[:])
else:
for i in range(A , len(A)):
UpperCamelCase , UpperCamelCase = nums[i], nums[start]
backtrack(start + 1)
UpperCamelCase , UpperCamelCase = nums[i], nums[start] # backtrack
UpperCamelCase = []
backtrack(0)
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
lowerCAmelCase : Dict = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 3 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__a: Dict = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = ['''pixel_values''']
def __init__( self : Any , lowerCamelCase : bool = True , lowerCamelCase : Dict[str, int] = None , lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase : bool = True , lowerCamelCase : Union[int, float] = 1 / 255 , lowerCamelCase : bool = True , lowerCamelCase : Optional[Union[float, List[float]]] = None , lowerCamelCase : Optional[Union[float, List[float]]] = None , lowerCamelCase : bool = True , **lowerCamelCase : List[Any] , ) -> None:
"""simple docstring"""
super().__init__(**lowerCamelCase )
_UpperCAmelCase = size if size is not None else {"""height""": 384, """width""": 384}
_UpperCAmelCase = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_UpperCAmelCase = image_std if image_std is not None else OPENAI_CLIP_STD
_UpperCAmelCase = do_convert_rgb
def lowerCamelCase ( self : Any , lowerCamelCase : np.ndarray , lowerCamelCase : Dict[str, int] , lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : str , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
_UpperCAmelCase = (size["""height"""], size["""width"""])
return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def lowerCamelCase ( self : Tuple , lowerCamelCase : np.ndarray , lowerCamelCase : Union[int, float] , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : Dict , ) -> Union[str, Any]:
"""simple docstring"""
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def lowerCamelCase ( self : Tuple , lowerCamelCase : np.ndarray , lowerCamelCase : Union[float, List[float]] , lowerCamelCase : Union[float, List[float]] , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : List[Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def lowerCamelCase ( self : Dict , lowerCamelCase : ImageInput , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[Dict[str, int]] = None , lowerCamelCase : PILImageResampling = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[float] = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[Union[float, List[float]]] = None , lowerCamelCase : Optional[Union[float, List[float]]] = None , lowerCamelCase : Optional[Union[str, TensorType]] = None , lowerCamelCase : bool = None , lowerCamelCase : ChannelDimension = ChannelDimension.FIRST , **lowerCamelCase : List[str] , ) -> PIL.Image.Image:
"""simple docstring"""
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
_UpperCAmelCase = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_UpperCAmelCase = [convert_to_rgb(lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
_UpperCAmelCase = BatchFeature(data={"""pixel_values""": images} , tensor_type=lowerCamelCase )
return encoded_outputs | 108 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def A_( A : float , A : float , A : int):
UpperCamelCase = x
UpperCamelCase = y
for step in range(A): # noqa: B007
UpperCamelCase = a * a - b * b + x
UpperCamelCase = 2 * a * b + y
UpperCamelCase = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def A_( A : float):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def A_( A : float):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(A , 1 , 1))
def A_( A : int = 800 , A : int = 600 , A : float = -0.6 , A : float = 0 , A : float = 3.2 , A : int = 50 , A : bool = True , ):
UpperCamelCase = Image.new('RGB' , (image_width, image_height))
UpperCamelCase = img.load()
# loop through the image-coordinates
for image_x in range(A):
for image_y in range(A):
# determine the figure-coordinates based on the image-coordinates
UpperCamelCase = figure_width / image_width * image_height
UpperCamelCase = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCamelCase = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCamelCase = get_distance(A , A , A)
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCamelCase = get_color_coded_rgb(A)
else:
UpperCamelCase = get_black_and_white_rgb(A)
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCAmelCase : Any = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 3 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class __a ( _snake_case ):
__UpperCamelCase : int = 'git_vision_model'
def __init__( self : int ,lowerCamelCase : Optional[int]=768 ,lowerCamelCase : Dict=3072 ,lowerCamelCase : List[str]=12 ,lowerCamelCase : List[Any]=12 ,lowerCamelCase : int=3 ,lowerCamelCase : Tuple=224 ,lowerCamelCase : str=16 ,lowerCamelCase : int="quick_gelu" ,lowerCamelCase : List[Any]=1E-5 ,lowerCamelCase : Dict=0.0 ,lowerCamelCase : Union[str, Any]=0.02 ,**lowerCamelCase : Union[str, Any] ,):
'''simple docstring'''
super().__init__(**lowerCamelCase )
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = hidden_act
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] ,lowerCamelCase : Union[str, os.PathLike] ,**lowerCamelCase : Tuple ):
'''simple docstring'''
cls._set_token_in_kwargs(lowerCamelCase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = cls.get_config_dict(lowerCamelCase ,**lowerCamelCase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
__SCREAMING_SNAKE_CASE = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCamelCase ,**lowerCamelCase )
class __a ( _snake_case ):
__UpperCamelCase : str = 'git'
def __init__( self : Any ,lowerCamelCase : Optional[int]=None ,lowerCamelCase : List[str]=3_0522 ,lowerCamelCase : Any=768 ,lowerCamelCase : int=6 ,lowerCamelCase : Tuple=12 ,lowerCamelCase : List[Any]=3072 ,lowerCamelCase : int="gelu" ,lowerCamelCase : Optional[Any]=0.1 ,lowerCamelCase : Optional[int]=0.1 ,lowerCamelCase : int=1024 ,lowerCamelCase : str=0.02 ,lowerCamelCase : Union[str, Any]=1E-1_2 ,lowerCamelCase : Any=0 ,lowerCamelCase : str="absolute" ,lowerCamelCase : Dict=True ,lowerCamelCase : List[Any]=False ,lowerCamelCase : List[Any]=101 ,lowerCamelCase : Tuple=102 ,lowerCamelCase : List[str]=None ,**lowerCamelCase : Any ,):
'''simple docstring'''
super().__init__(bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase ,pad_token_id=lowerCamelCase ,**lowerCamelCase )
if vision_config is None:
__SCREAMING_SNAKE_CASE = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
__SCREAMING_SNAKE_CASE = GitVisionConfig(**lowerCamelCase )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = tie_word_embeddings
__SCREAMING_SNAKE_CASE = num_image_with_embedding
__SCREAMING_SNAKE_CASE = bos_token_id
__SCREAMING_SNAKE_CASE = eos_token_id
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
__SCREAMING_SNAKE_CASE = self.vision_config.to_dict()
__SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 109 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase : Optional[Any] = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 3 | 0 |
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a ( lowercase , lowercase , unittest.TestCase ):
UpperCamelCase : Union[str, Any] = StableDiffusionDiffEditPipeline
UpperCamelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""height""", """width""", """image"""} | {"""image_latents"""}
UpperCamelCase : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"""image"""} | {"""image_latents"""}
UpperCamelCase : Any = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase : List[Any] = frozenset([] )
def __snake_case ( self ):
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase_ , )
UpperCAmelCase__ : List[Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
UpperCAmelCase__ : List[Any] = DDIMInverseScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=UpperCamelCase_ , set_alpha_to_zero=UpperCamelCase_ , )
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='gelu' , projection_dim=512 , )
UpperCAmelCase__ : str = CLIPTextModel(UpperCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCAmelCase__ : Optional[int] = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
UpperCAmelCase__ : Tuple = floats_tensor((1, 16, 16) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
if str(UpperCamelCase_ ).startswith('mps' ):
UpperCAmelCase__ : Optional[int] = torch.manual_seed(UpperCamelCase_ )
else:
UpperCAmelCase__ : Optional[int] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
UpperCAmelCase__ : Optional[int] = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
UpperCAmelCase__ : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
UpperCAmelCase__ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ : List[Any] = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('RGB' )
if str(UpperCamelCase_ ).startswith('mps' ):
UpperCAmelCase__ : Optional[int] = torch.manual_seed(UpperCamelCase_ )
else:
UpperCAmelCase__ : Optional[Any] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
UpperCAmelCase__ : Dict = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
UpperCAmelCase__ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ : Optional[int] = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('RGB' )
if str(UpperCamelCase_ ).startswith('mps' ):
UpperCAmelCase__ : Tuple = torch.manual_seed(UpperCamelCase_ )
else:
UpperCAmelCase__ : Tuple = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
UpperCAmelCase__ : str = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def __snake_case ( self ):
if not hasattr(self.pipeline_class , '_optional_components' ):
return
UpperCAmelCase__ : int = self.get_dummy_components()
UpperCAmelCase__ : List[Any] = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
UpperCAmelCase__ : Dict = self.get_dummy_inputs(UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = pipe(**UpperCamelCase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCamelCase_ )
UpperCAmelCase__ : str = self.pipeline_class.from_pretrained(UpperCamelCase_ )
pipe_loaded.to(UpperCamelCase_ )
pipe_loaded.set_progress_bar_config(disable=UpperCamelCase_ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCamelCase_ , UpperCamelCase_ ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
UpperCAmelCase__ : Optional[int] = self.get_dummy_inputs(UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = pipe_loaded(**UpperCamelCase_ )[0]
UpperCAmelCase__ : Tuple = np.abs(output - output_loaded ).max()
self.assertLess(UpperCamelCase_ , 1E-4 )
def __snake_case ( self ):
UpperCAmelCase__ : List[Any] = 'cpu'
UpperCAmelCase__ : Optional[Any] = self.get_dummy_components()
UpperCAmelCase__ : List[str] = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = self.get_dummy_mask_inputs(UpperCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = pipe.generate_mask(**UpperCamelCase_ )
UpperCAmelCase__ : Any = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
UpperCAmelCase__ : str = np.array([0] * 9 )
UpperCAmelCase__ : Dict = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase_ , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def __snake_case ( self ):
UpperCAmelCase__ : Tuple = 'cpu'
UpperCAmelCase__ : List[str] = self.get_dummy_components()
UpperCAmelCase__ : Optional[int] = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = self.get_dummy_inversion_inputs(UpperCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = pipe.invert(**UpperCamelCase_ ).images
UpperCAmelCase__ : List[str] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase__ : Optional[int] = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799] , )
UpperCAmelCase__ : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase_ , 1E-3 )
def __snake_case ( self ):
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def __snake_case ( self ):
UpperCAmelCase__ : Optional[Any] = 'cpu'
UpperCAmelCase__ : int = self.get_dummy_components()
UpperCAmelCase__ : List[Any] = {'beta_start': 0.00085, 'beta_end': 0.012, 'beta_schedule': 'scaled_linear'}
UpperCAmelCase__ : List[str] = DPMSolverMultistepScheduler(**UpperCamelCase_ )
UpperCAmelCase__ : Tuple = DPMSolverMultistepInverseScheduler(**UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = self.get_dummy_inversion_inputs(UpperCamelCase_ )
UpperCAmelCase__ : Tuple = pipe.invert(**UpperCamelCase_ ).images
UpperCAmelCase__ : Optional[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase__ : Dict = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799] , )
UpperCAmelCase__ : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase_ , 1E-3 )
@require_torch_gpu
@slow
class a ( unittest.TestCase ):
def __snake_case ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def __snake_case ( cls ):
UpperCAmelCase__ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
UpperCAmelCase__ : List[Any] = raw_image.convert('RGB' ).resize((768, 768) )
UpperCAmelCase__ : List[Any] = raw_image
def __snake_case ( self ):
UpperCAmelCase__ : int = torch.manual_seed(0 )
UpperCAmelCase__ : List[str] = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=UpperCamelCase_ , torch_dtype=torch.floataa )
UpperCAmelCase__ : Tuple = DDIMScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase__ : Tuple = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCAmelCase__ : List[str] = 'a bowl of fruit'
UpperCAmelCase__ : Optional[Any] = 'a bowl of pears'
UpperCAmelCase__ : Tuple = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCamelCase_ , target_prompt=UpperCamelCase_ , generator=UpperCamelCase_ , )
UpperCAmelCase__ : str = pipe.invert(
prompt=UpperCamelCase_ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase_ ).latents
UpperCAmelCase__ : Any = pipe(
prompt=UpperCamelCase_ , mask_image=UpperCamelCase_ , image_latents=UpperCamelCase_ , generator=UpperCamelCase_ , negative_prompt=UpperCamelCase_ , inpaint_strength=0.7 , output_type='numpy' , ).images[0]
UpperCAmelCase__ : List[str] = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def __snake_case ( self ):
UpperCAmelCase__ : str = torch.manual_seed(0 )
UpperCAmelCase__ : Any = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=UpperCamelCase_ , torch_dtype=torch.floataa )
UpperCAmelCase__ : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase__ : List[Any] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCAmelCase__ : List[str] = 'a bowl of fruit'
UpperCAmelCase__ : Tuple = 'a bowl of pears'
UpperCAmelCase__ : List[Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCamelCase_ , target_prompt=UpperCamelCase_ , generator=UpperCamelCase_ , )
UpperCAmelCase__ : Tuple = pipe.invert(
prompt=UpperCamelCase_ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase_ , num_inference_steps=25 , ).latents
UpperCAmelCase__ : Any = pipe(
prompt=UpperCamelCase_ , mask_image=UpperCamelCase_ , image_latents=UpperCamelCase_ , generator=UpperCamelCase_ , negative_prompt=UpperCamelCase_ , inpaint_strength=0.7 , num_inference_steps=25 , output_type='numpy' , ).images[0]
UpperCAmelCase__ : Union[str, Any] = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 110 |
'''simple docstring'''
lowerCAmelCase : Optional[Any] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def A_( A : dict , A : str , A : Optional[Any]):
UpperCamelCase = set()
# keep track of all the paths to be checked
UpperCamelCase = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
UpperCamelCase = queue.pop(0)
# get the last node from the path
UpperCamelCase = path[-1]
if node not in explored:
UpperCamelCase = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
UpperCamelCase = list(A)
new_path.append(A)
queue.append(A)
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(A)
# in case there's no path between the 2 nodes
return []
def A_( A : dict , A : str , A : Tuple):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
UpperCamelCase = [start]
UpperCamelCase = set(A)
# Keep tab on distances from `start` node.
UpperCamelCase = {start: 0, target: -1}
while queue:
UpperCamelCase = queue.pop(0)
if node == target:
UpperCamelCase = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node])
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(A)
queue.append(A)
UpperCamelCase = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
| 3 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase__ : Dict = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[Any] = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
lowercase__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 376 |
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class SCREAMING_SNAKE_CASE__ :
def __init__( self )-> Dict:
'''simple docstring'''
UpperCamelCase = ''
UpperCamelCase = ''
UpperCamelCase = []
UpperCamelCase = 0
UpperCamelCase = 256
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
def UpperCAmelCase_ ( self , A_ )-> str:
'''simple docstring'''
UpperCamelCase = cva.imread(A_ , 0 )
UpperCamelCase = copy.deepcopy(self.img )
UpperCamelCase , UpperCamelCase , UpperCamelCase = plt.hist(self.img.ravel() , 256 , [0, 256] , label='x' )
UpperCamelCase = np.sum(A_ )
for i in range(len(A_ ) ):
UpperCamelCase = x[i] / self.k
self.sk += prk
UpperCamelCase = (self.L - 1) * self.sk
if self.rem != 0:
UpperCamelCase = int(last % last )
UpperCamelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(A_ )
UpperCamelCase = int(np.ma.count(self.img ) / self.img[1].size )
UpperCamelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCamelCase = self.img[j][i]
if num != self.last_list[num]:
UpperCamelCase = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
plt.hist(self.img.ravel() , 256 , [0, 256] )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
lowerCAmelCase : str = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 3 | 0 |
'''simple docstring'''
import re
def a_ ( _UpperCAmelCase : str ) -> Optional[int]:
if len(re.findall('[ATCG]' ,_UpperCAmelCase ) ) != len(_UpperCAmelCase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' ,'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 286 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """unispeech-sat"""
def __init__( self , A_=32 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.1 , A_=0.1 , A_=0.02 , A_=1e-5 , A_="group" , A_="gelu" , A_=(512, 512, 512, 512, 512, 512, 512) , A_=(5, 2, 2, 2, 2, 2, 2) , A_=(10, 3, 3, 3, 3, 2, 2) , A_=False , A_=128 , A_=16 , A_=False , A_=True , A_=0.05 , A_=10 , A_=2 , A_=0.0 , A_=10 , A_=0 , A_=320 , A_=2 , A_=0.1 , A_=100 , A_=256 , A_=256 , A_=0.1 , A_="mean" , A_=False , A_=False , A_=256 , A_=(512, 512, 512, 512, 1500) , A_=(5, 3, 3, 1, 1) , A_=(1, 2, 3, 1, 1) , A_=512 , A_=0 , A_=1 , A_=2 , A_=504 , **A_ , )-> Tuple:
'''simple docstring'''
super().__init__(**A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ )
UpperCamelCase = hidden_size
UpperCamelCase = feat_extract_norm
UpperCamelCase = feat_extract_activation
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
UpperCamelCase = conv_bias
UpperCamelCase = num_conv_pos_embeddings
UpperCamelCase = num_conv_pos_embedding_groups
UpperCamelCase = len(self.conv_dim )
UpperCamelCase = num_hidden_layers
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = feat_proj_dropout
UpperCamelCase = final_dropout
UpperCamelCase = layerdrop
UpperCamelCase = layer_norm_eps
UpperCamelCase = initializer_range
UpperCamelCase = vocab_size
UpperCamelCase = num_clusters
UpperCamelCase = do_stable_layer_norm
UpperCamelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase = apply_spec_augment
UpperCamelCase = mask_time_prob
UpperCamelCase = mask_time_length
UpperCamelCase = mask_time_min_masks
UpperCamelCase = mask_feature_prob
UpperCamelCase = mask_feature_length
UpperCamelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCamelCase = num_codevectors_per_group
UpperCamelCase = num_codevector_groups
UpperCamelCase = contrastive_logits_temperature
UpperCamelCase = feat_quantizer_dropout
UpperCamelCase = num_negatives
UpperCamelCase = codevector_dim
UpperCamelCase = proj_codevector_dim
UpperCamelCase = diversity_loss_weight
# ctc loss
UpperCamelCase = ctc_loss_reduction
UpperCamelCase = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
UpperCamelCase = xvector_output_dim
@property
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 3 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : Any = {
'configuration_lxmert': ['LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LxmertConfig'],
'tokenization_lxmert': ['LxmertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = ['LxmertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = [
'LxmertEncoder',
'LxmertForPreTraining',
'LxmertForQuestionAnswering',
'LxmertModel',
'LxmertPreTrainedModel',
'LxmertVisualFeatureEncoder',
'LxmertXLayer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
'TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLxmertForPreTraining',
'TFLxmertMainLayer',
'TFLxmertModel',
'TFLxmertPreTrainedModel',
'TFLxmertVisualFeatureEncoder',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
__lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 297 |
'''simple docstring'''
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self , A_ , A_=100 , A_=13 , A_=30 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=4 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=3 , A_=None , A_=[0, 1, 2, 3] , )-> Any:
'''simple docstring'''
UpperCamelCase = parent
UpperCamelCase = 100
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = scope
UpperCamelCase = out_indices
UpperCamelCase = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase = (image_size // patch_size) ** 2
UpperCamelCase = num_patches + 1
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ )-> List[str]:
'''simple docstring'''
UpperCamelCase = BeitModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ )-> Any:
'''simple docstring'''
UpperCamelCase = BeitForMaskedImageModeling(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.type_sequence_label_size
UpperCamelCase = BeitForImageClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = BeitForImageClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = self.num_labels
UpperCamelCase = BeitForSemanticSegmentation(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
UpperCamelCase = model(A_ , labels=A_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , unittest.TestCase):
lowerCAmelCase_ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{
"""feature-extraction""": BeitModel,
"""image-classification""": BeitForImageClassification,
"""image-segmentation""": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = BeitModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A_ )
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(A_ ), BeitForMaskedImageModeling]:
continue
UpperCamelCase = model_class(A_ )
model.to(A_ )
model.train()
UpperCamelCase = self._prepare_for_class(A_ , A_ , return_labels=A_ )
UpperCamelCase = model(**A_ ).loss
loss.backward()
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCamelCase = False
UpperCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(A_ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCamelCase = model_class(A_ )
model.gradient_checkpointing_enable()
model.to(A_ )
model.train()
UpperCamelCase = self._prepare_for_class(A_ , A_ , return_labels=A_ )
UpperCamelCase = model(**A_ ).loss
loss.backward()
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = _config_zero_init(A_ )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(config=A_ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = BeitModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def A_( ):
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
@cached_property
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(A_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).pixel_values.to(A_ )
# prepare bool_masked_pos
UpperCamelCase = torch.ones((1, 196) , dtype=torch.bool ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(pixel_values=A_ , bool_masked_pos=A_ )
UpperCamelCase = outputs.logits
# verify the logits
UpperCamelCase = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , A_ )
UpperCamelCase = torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(A_ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , A_ , atol=1e-2 ) )
@slow
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(A_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
UpperCamelCase = outputs.logits
# verify the logits
UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(logits.shape , A_ )
UpperCamelCase = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(A_ )
self.assertTrue(torch.allclose(logits[0, :3] , A_ , atol=1e-4 ) )
UpperCamelCase = 281
self.assertEqual(logits.argmax(-1 ).item() , A_ )
@slow
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
A_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
UpperCamelCase = outputs.logits
# verify the logits
UpperCamelCase = torch.Size((1, 21841) )
self.assertEqual(logits.shape , A_ )
UpperCamelCase = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(A_ )
self.assertTrue(torch.allclose(logits[0, :3] , A_ , atol=1e-4 ) )
UpperCamelCase = 2396
self.assertEqual(logits.argmax(-1 ).item() , A_ )
@slow
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
UpperCamelCase = model.to(A_ )
UpperCamelCase = BeitImageProcessor(do_resize=A_ , size=640 , do_center_crop=A_ )
UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
UpperCamelCase = Image.open(ds[0]['file'] )
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
UpperCamelCase = outputs.logits
# verify the logits
UpperCamelCase = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , A_ )
UpperCamelCase = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
UpperCamelCase = torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
] , device=A_ , )
else:
UpperCamelCase = torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
] , device=A_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A_ , atol=1e-4 ) )
@slow
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
UpperCamelCase = model.to(A_ )
UpperCamelCase = BeitImageProcessor(do_resize=A_ , size=640 , do_center_crop=A_ )
UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
UpperCamelCase = Image.open(ds[0]['file'] )
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
UpperCamelCase = outputs.logits.detach().cpu()
UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=A_ , target_sizes=[(500, 300)] )
UpperCamelCase = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , A_ )
UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=A_ )
UpperCamelCase = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , A_ )
| 3 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a_ = {
'configuration_efficientformer': [
'EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientFormerConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['EfficientFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientFormerForImageClassification',
'EfficientFormerForImageClassificationWithTeacher',
'EfficientFormerModel',
'EfficientFormerPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFEfficientFormerForImageClassification',
'TFEfficientFormerForImageClassificationWithTeacher',
'TFEfficientFormerModel',
'TFEfficientFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 437 |
'''simple docstring'''
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase : Dict = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( enum.Enum):
lowerCAmelCase_ = 0
lowerCAmelCase_ = 1
@add_end_docstrings(snake_case_)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """generated"""
def __init__( self , *A_ , **A_ )-> Optional[int]:
'''simple docstring'''
super().__init__(*A_ , **A_ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCAmelCase_ ( self , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , **A_ , )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = {}
if truncation is not None:
UpperCamelCase = truncation
UpperCamelCase = generate_kwargs
UpperCamelCase = {}
if return_tensors is not None and return_type is None:
UpperCamelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
UpperCamelCase = return_type
if clean_up_tokenization_spaces is not None:
UpperCamelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCamelCase = self.tokenizer.encode(A_ , add_special_tokens=A_ )
if len(A_ ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
UpperCamelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> Optional[int]:
'''simple docstring'''
return True
def UpperCAmelCase_ ( self , *A_ , A_ )-> Any:
'''simple docstring'''
UpperCamelCase = self.model.config.prefix if self.model.config.prefix is not None else ''
if isinstance(args[0] , A_ ):
if self.tokenizer.pad_token_id is None:
raise ValueError('Please make sure that the tokenizer has a pad_token_id when using a batch input' )
UpperCamelCase = ([prefix + arg for arg in args[0]],)
UpperCamelCase = True
elif isinstance(args[0] , A_ ):
UpperCamelCase = (prefix + args[0],)
UpperCamelCase = False
else:
raise ValueError(
F''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
UpperCamelCase = self.tokenizer(*A_ , padding=A_ , truncation=A_ , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *A_ , **A_ )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = super().__call__(*A_ , **A_ )
if (
isinstance(args[0] , A_ )
and all(isinstance(A_ , A_ ) for el in args[0] )
and all(len(A_ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCAmelCase_ ( self , A_ , A_=TruncationStrategy.DO_NOT_TRUNCATE , **A_ )-> Any:
'''simple docstring'''
UpperCamelCase = self._parse_and_tokenize(A_ , truncation=A_ , **A_ )
return inputs
def UpperCAmelCase_ ( self , A_ , **A_ )-> int:
'''simple docstring'''
if self.framework == "pt":
UpperCamelCase , UpperCamelCase = model_inputs['input_ids'].shape
elif self.framework == "tf":
UpperCamelCase , UpperCamelCase = tf.shape(model_inputs['input_ids'] ).numpy()
UpperCamelCase = generate_kwargs.get('min_length' , self.model.config.min_length )
UpperCamelCase = generate_kwargs.get('max_length' , self.model.config.max_length )
self.check_inputs(A_ , generate_kwargs['min_length'] , generate_kwargs['max_length'] )
UpperCamelCase = self.model.generate(**A_ , **A_ )
UpperCamelCase = output_ids.shape[0]
if self.framework == "pt":
UpperCamelCase = output_ids.reshape(A_ , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
UpperCamelCase = tf.reshape(A_ , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCAmelCase_ ( self , A_ , A_=ReturnType.TEXT , A_=False )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
UpperCamelCase = {F'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
UpperCamelCase = {
F'''{self.return_name}_text''': self.tokenizer.decode(
A_ , skip_special_tokens=A_ , clean_up_tokenization_spaces=A_ , )
}
records.append(A_ )
return records
@add_end_docstrings(snake_case_)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """summary"""
def __call__( self , *A_ , **A_ )-> Optional[int]:
'''simple docstring'''
return super().__call__(*A_ , **A_ )
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> bool:
'''simple docstring'''
if max_length < min_length:
logger.warning(F'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
F'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
'a summarization task, where outputs shorter than the input are typically wanted, you might '
F'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(snake_case_)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """translation"""
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> List[Any]:
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
F'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
'increasing your max_length manually, e.g. translator(\'...\', max_length=400)' )
return True
def UpperCAmelCase_ ( self , *A_ , A_=TruncationStrategy.DO_NOT_TRUNCATE , A_=None , A_=None )-> Dict:
'''simple docstring'''
if getattr(self.tokenizer , '_build_translation_inputs' , A_ ):
return self.tokenizer._build_translation_inputs(
*A_ , return_tensors=self.framework , truncation=A_ , src_lang=A_ , tgt_lang=A_ )
else:
return super()._parse_and_tokenize(*A_ , truncation=A_ )
def UpperCAmelCase_ ( self , A_=None , A_=None , **A_ )-> str:
'''simple docstring'''
UpperCamelCase , UpperCamelCase , UpperCamelCase = super()._sanitize_parameters(**A_ )
if src_lang is not None:
UpperCamelCase = src_lang
if tgt_lang is not None:
UpperCamelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
UpperCamelCase = kwargs.get('task' , self.task )
UpperCamelCase = task.split('_' )
if task and len(A_ ) == 4:
# translation, XX, to YY
UpperCamelCase = items[1]
UpperCamelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *A_ , **A_ )-> Any:
'''simple docstring'''
return super().__call__(*A_ , **A_ )
| 3 | 0 |
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
UpperCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(snake_case_ )
class a ( snake_case_ ):
def __init__( self : Tuple , *snake_case__ : str , **snake_case__ : Dict ):
"""simple docstring"""
super().__init__(*A_ , **A_ )
requires_backends(self , "vision" )
self.check_model_type(A_ )
def __call__( self : Optional[Any] , snake_case__ : str , **snake_case__ : Any ):
"""simple docstring"""
return super().__call__(A_ , **A_ )
def UpperCAmelCase__ ( self : Union[str, Any] , **snake_case__ : int ):
"""simple docstring"""
return {}, {}, {}
def UpperCAmelCase__ ( self : Dict , snake_case__ : Tuple ):
"""simple docstring"""
__lowerCAmelCase = load_image(A_ )
__lowerCAmelCase = image.size
__lowerCAmelCase = self.image_processor(images=A_ , return_tensors=self.framework )
return model_inputs
def UpperCAmelCase__ ( self : int , snake_case__ : List[Any] ):
"""simple docstring"""
__lowerCAmelCase = self.model(**A_ )
return model_outputs
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
__lowerCAmelCase = model_outputs.predicted_depth
__lowerCAmelCase = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="bicubic" , align_corners=A_ )
__lowerCAmelCase = prediction.squeeze().cpu().numpy()
__lowerCAmelCase = (output * 255 / np.max(A_ )).astype("uint8" )
__lowerCAmelCase = Image.fromarray(A_ )
__lowerCAmelCase = {}
__lowerCAmelCase = predicted_depth
__lowerCAmelCase = depth
return output_dict
| 611 |
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = 0
lowerCAmelCase_ = False
lowerCAmelCase_ = 3.0
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=A_ ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} )
@require_cuda
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
UpperCamelCase = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
UpperCamelCase = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , A_ )
@require_multi_gpu
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(A_ , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase : Tuple = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
lowerCAmelCase : List[str] = Accelerator(kwargs_handlers=[ddp_scaler])
lowerCAmelCase : List[Any] = torch.nn.Linear(1_00, 2_00)
lowerCAmelCase : int = accelerator.prepare(model)
# Check the values changed in kwargs
lowerCAmelCase : Dict = ''
lowerCAmelCase : Dict = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 3 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
if b == 0:
return (1, 0)
((snake_case__) , (snake_case__)) : Optional[int] = extended_euclid(__SCREAMING_SNAKE_CASE , a % b )
snake_case__ : Dict = a // b
return (y, x - k * y)
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
((snake_case__) , (snake_case__)) : List[str] = extended_euclid(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : str = na * na
snake_case__ : List[Any] = ra * x * na + ra * y * na
return (n % m + m) % m
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]:
((snake_case__) , (snake_case__)) : Union[str, Any] = extended_euclid(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if b < 0:
snake_case__ : Optional[Any] = (b % n + n) % n
return b
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
snake_case__ , snake_case__ : List[Any] = invert_modulo(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ), invert_modulo(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Any = na * na
snake_case__ : Dict = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 270 |
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_):
@register_to_config
def __init__( self , A_ , A_ = None , A_ = None )-> Tuple:
'''simple docstring'''
super().__init__()
UpperCamelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
UpperCamelCase = torch.zeros(A_ , A_ )
else:
UpperCamelCase = None
UpperCamelCase = torch.nn.Parameter(A_ )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ , )-> Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=A_ , transformer=A_ , text_encoder=A_ , tokenizer=A_ , scheduler=A_ , learned_classifier_free_sampling_embeddings=A_ , )
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> Tuple:
'''simple docstring'''
UpperCamelCase = len(A_ ) if isinstance(A_ , A_ ) else 1
# get prompt text embeddings
UpperCamelCase = self.tokenizer(
A_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
UpperCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
UpperCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=A_ )
# duplicate text embeddings for each generation per prompt
UpperCamelCase = prompt_embeds.repeat_interleave(A_ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
UpperCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings
UpperCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(A_ , 1 , 1 )
else:
UpperCamelCase = [''] * batch_size
UpperCamelCase = text_input_ids.shape[-1]
UpperCamelCase = self.tokenizer(
A_ , padding='max_length' , max_length=A_ , truncation=A_ , return_tensors='pt' , )
UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
UpperCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=A_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase = negative_prompt_embeds.shape[1]
UpperCamelCase = negative_prompt_embeds.repeat(1 , A_ , 1 )
UpperCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , A_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , A_ , A_ = 100 , A_ = 5.0 , A_ = 1.0 , A_ = 1 , A_ = None , A_ = None , A_ = "pil" , A_ = True , A_ = None , A_ = 1 , )-> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(A_ , A_ ):
UpperCamelCase = 1
elif isinstance(A_ , A_ ):
UpperCamelCase = len(A_ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(A_ )}''' )
UpperCamelCase = batch_size * num_images_per_prompt
UpperCamelCase = guidance_scale > 1.0
UpperCamelCase = self._encode_prompt(A_ , A_ , A_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A_ , A_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(A_ )}.''' )
# get the initial completely masked latents unless the user supplied it
UpperCamelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
UpperCamelCase = self.transformer.num_vector_embeds - 1
UpperCamelCase = torch.full(A_ , A_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
UpperCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(A_ , device=self.device )
UpperCamelCase = self.scheduler.timesteps.to(self.device )
UpperCamelCase = latents
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the sample if we are doing classifier free guidance
UpperCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
UpperCamelCase = self.transformer(A_ , encoder_hidden_states=A_ , timestep=A_ ).sample
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase = model_output.chunk(2 )
UpperCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(A_ , dim=1 , keepdim=A_ )
UpperCamelCase = self.truncate(A_ , A_ )
# remove `log(0)`'s (`-inf`s)
UpperCamelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase = self.scheduler.step(A_ , timestep=A_ , sample=A_ , generator=A_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A_ , A_ , A_ )
UpperCamelCase = self.vqvae.config.vq_embed_dim
UpperCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
UpperCamelCase = self.vqvae.quantize.get_codebook_entry(A_ , shape=A_ )
UpperCamelCase = self.vqvae.decode(A_ , force_not_quantize=A_ ).sample
UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
def UpperCAmelCase_ ( self , A_ , A_ )-> torch.FloatTensor:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = torch.sort(A_ , 1 , descending=A_ )
UpperCamelCase = torch.exp(A_ )
UpperCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
UpperCamelCase = torch.full_like(keep_mask[:, 0:1, :] , A_ )
UpperCamelCase = torch.cat((all_true, keep_mask) , dim=1 )
UpperCamelCase = keep_mask[:, :-1, :]
UpperCamelCase = keep_mask.gather(1 , indices.argsort(1 ) )
UpperCamelCase = log_p_x_0.clone()
UpperCamelCase = -torch.inf # -inf = log(0)
return rv
| 3 | 0 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase , __lowercase = 13 , __lowercase = 64 , __lowercase = 2 , __lowercase = 3 , __lowercase = 3 , __lowercase = True , __lowercase = True , __lowercase = 128 , __lowercase=[16, 32, 64, 128] , __lowercase = 7 , __lowercase = 4 , __lowercase = 37 , __lowercase = "gelu" , __lowercase = 0.1 , __lowercase = 0.1 , __lowercase = 10 , __lowercase = 0.02 , __lowercase = 2 , __lowercase = 1 , __lowercase = 128 , __lowercase = [2, 2, 2, 2] , __lowercase = 2 , __lowercase = 2 , ) -> int:
__UpperCamelCase :int = parent
__UpperCamelCase :Any = batch_size
__UpperCamelCase :Union[str, Any] = image_size
__UpperCamelCase :str = patch_size
__UpperCamelCase :List[Any] = num_channels
__UpperCamelCase :List[Any] = is_training
__UpperCamelCase :Optional[Any] = use_labels
__UpperCamelCase :Any = hidden_size
__UpperCamelCase :Tuple = num_hidden_layers
__UpperCamelCase :Dict = num_attention_heads
__UpperCamelCase :List[Any] = intermediate_size
__UpperCamelCase :int = hidden_act
__UpperCamelCase :Any = hidden_dropout_prob
__UpperCamelCase :Optional[Any] = attention_probs_dropout_prob
__UpperCamelCase :Dict = type_sequence_label_size
__UpperCamelCase :Dict = initializer_range
__UpperCamelCase :List[str] = encoder_stride
__UpperCamelCase :Any = num_attention_outputs
__UpperCamelCase :str = embed_dim
__UpperCamelCase :str = embed_dim + 1
__UpperCamelCase :Dict = resolution
__UpperCamelCase :Optional[Any] = depths
__UpperCamelCase :List[str] = hidden_sizes
__UpperCamelCase :str = dim
__UpperCamelCase :str = mlp_expansion_ratio
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__UpperCamelCase :List[Any] = None
if self.use_labels:
__UpperCamelCase :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCamelCase :Optional[Any] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self) -> Optional[int]:
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> int:
__UpperCamelCase :Optional[Any] = TFEfficientFormerModel(config=A_)
__UpperCamelCase :Any = model(A_ , training=A_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> Optional[Any]:
__UpperCamelCase :Dict = self.type_sequence_label_size
__UpperCamelCase :Optional[Any] = TFEfficientFormerForImageClassification(A_)
__UpperCamelCase :Union[str, Any] = model(A_ , labels=A_ , training=A_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
__UpperCamelCase :Optional[Any] = 1
__UpperCamelCase :Any = TFEfficientFormerForImageClassification(A_)
__UpperCamelCase :List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__UpperCamelCase :Any = model(A_ , labels=A_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :List[Any] = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Optional[Any] = config_and_inputs
__UpperCamelCase :Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
a__ : Any = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
a__ : Optional[int] = (
{
"""feature-extraction""": TFEfficientFormerModel,
"""image-classification""": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
a__ : Any = False
a__ : Tuple = False
a__ : List[Any] = False
a__ : List[str] = False
a__ : List[str] = False
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :Optional[Any] = TFEfficientFormerModelTester(self)
__UpperCamelCase :Optional[int] = ConfigTester(
self , config_class=A_ , has_text_modality=A_ , hidden_size=37)
def UpperCamelCase__ ( self) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''EfficientFormer does not use inputs_embeds''')
def UpperCamelCase__ ( self) -> List[Any]:
pass
@unittest.skip(reason='''EfficientFormer does not support input and output embeddings''')
def UpperCamelCase__ ( self) -> int:
pass
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase :List[Any] = model_class(A_)
__UpperCamelCase :Tuple = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase :List[str] = [*signature.parameters.keys()]
__UpperCamelCase :Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A_)
def UpperCamelCase__ ( self) -> Optional[int]:
def check_hidden_states_output(__lowercase , __lowercase , __lowercase):
__UpperCamelCase :Union[str, Any] = model_class(A_)
__UpperCamelCase :Any = model(**self._prepare_for_class(A_ , A_) , training=A_)
__UpperCamelCase :Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCamelCase :List[str] = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(A_) , A_)
if hasattr(self.model_tester , '''encoder_seq_length'''):
__UpperCamelCase :Union[str, Any] = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , '''chunk_length''') and self.model_tester.chunk_length > 1:
__UpperCamelCase :List[Any] = seq_length * self.model_tester.chunk_length
else:
__UpperCamelCase :List[Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__UpperCamelCase :Dict = outputs.decoder_hidden_states
self.asseretIsInstance(A_ , (list, tuple))
self.assertEqual(len(A_) , A_)
__UpperCamelCase :str = getattr(self.model_tester , '''seq_length''' , A_)
__UpperCamelCase :int = getattr(self.model_tester , '''decoder_seq_length''' , A_)
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , )
__UpperCamelCase , __UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase :Optional[Any] = True
check_hidden_states_output(A_ , A_ , A_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCamelCase :List[str] = True
check_hidden_states_output(A_ , A_ , A_)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase=False) -> int:
__UpperCamelCase :List[str] = super()._prepare_for_class(A_ , A_ , return_labels=A_)
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_)
@unittest.skip(reason='''EfficientFormer does not implement masked image modeling yet''')
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A_)
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_)
@slow
def UpperCamelCase__ ( self) -> Any:
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase :Optional[int] = TFEfficientFormerModel.from_pretrained(A_)
self.assertIsNotNone(A_)
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase , __UpperCamelCase :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase :int = True
__UpperCamelCase :List[Any] = getattr(self.model_tester , '''seq_length''' , A_)
__UpperCamelCase :Optional[int] = getattr(self.model_tester , '''encoder_seq_length''' , A_)
__UpperCamelCase :Optional[int] = getattr(self.model_tester , '''key_length''' , A_)
__UpperCamelCase :Optional[int] = getattr(self.model_tester , '''chunk_length''' , A_)
if chunk_length is not None and hasattr(self.model_tester , '''num_hashes'''):
__UpperCamelCase :Dict = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__UpperCamelCase :List[Any] = True
__UpperCamelCase :List[str] = False
__UpperCamelCase :int = True
__UpperCamelCase :List[str] = model_class(A_)
__UpperCamelCase :Optional[Any] = model(**self._prepare_for_class(A_ , A_) , training=A_)
__UpperCamelCase :str = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A_) , self.model_tester.num_attention_outputs)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__UpperCamelCase :str = True
__UpperCamelCase :str = model_class(A_)
__UpperCamelCase :Union[str, Any] = model(**self._prepare_for_class(A_ , A_) , training=A_)
__UpperCamelCase :Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A_) , self.model_tester.num_attention_outputs)
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase , __UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__UpperCamelCase :List[str] = model_class(A_)
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__UpperCamelCase :List[Any] = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=A_)
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__UpperCamelCase :Optional[int] = model(A_)
self.assertTrue(outputs_dict is not None)
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self) -> Optional[int]:
return (
EfficientFormerImageProcessor.from_pretrained('''snap-research/efficientformer-l1-300''')
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Optional[Any] = TFEfficientFormerForImageClassification.from_pretrained('''snap-research/efficientformer-l1-300''')
__UpperCamelCase :Optional[Any] = self.default_image_processor
__UpperCamelCase :Union[str, Any] = prepare_img()
__UpperCamelCase :Any = image_processor(images=A_ , return_tensors='''tf''')
# forward pass
__UpperCamelCase :List[str] = model(**A_ , training=A_)
# verify the logits
__UpperCamelCase :str = tf.TensorShape((1, 1_000))
self.assertEqual(outputs.logits.shape , A_)
__UpperCamelCase :List[Any] = tf.constant([-0.05_55, 0.48_25, -0.08_52])
self.assertTrue(np.allclose(outputs.logits[0, :3] , A_ , atol=1E-4))
@slow
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :Optional[int] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'''snap-research/efficientformer-l1-300''')
__UpperCamelCase :Optional[Any] = self.default_image_processor
__UpperCamelCase :List[Any] = prepare_img()
__UpperCamelCase :Optional[Any] = image_processor(images=A_ , return_tensors='''tf''')
# forward pass
__UpperCamelCase :int = model(**A_ , training=A_)
# verify the logits
__UpperCamelCase :Tuple = tf.TensorShape((1, 1_000))
self.assertEqual(outputs.logits.shape , A_)
__UpperCamelCase :Any = tf.constant([-0.13_12, 0.43_53, -1.04_99])
self.assertTrue(np.allclose(outputs.logits[0, :3] , A_ , atol=1E-4))
| 167 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Union[str, Any] = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 3 | 0 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''):
raise Exception('''requires fairseq >= 1.0.0a''')
logging.set_verbosity_info()
A__ : Any = logging.get_logger(__name__)
A__ : Union[str, Any] = 'Hello world! cécé herlolip'
def UpperCamelCase( __UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : bool ):
lowerCAmelCase_ : List[str] = FairseqRobertaModel.from_pretrained(__UpperCamelCase )
roberta.eval() # disable dropout
lowerCAmelCase_ : Any = roberta.model.encoder.sentence_encoder
lowerCAmelCase_ : Union[str, Any] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1e-5 ,)
if classification_head:
lowerCAmelCase_ : Dict = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our RoBERTa config:''' ,__UpperCamelCase )
lowerCAmelCase_ : Optional[int] = XLMRobertaXLForSequenceClassification(__UpperCamelCase ) if classification_head else XLMRobertaXLForMaskedLM(__UpperCamelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCAmelCase_ : str = roberta_sent_encoder.embed_tokens.weight
lowerCAmelCase_ : str = roberta_sent_encoder.embed_positions.weight
lowerCAmelCase_ : List[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
lowerCAmelCase_ : List[str] = roberta_sent_encoder.layer_norm.weight
lowerCAmelCase_ : Union[str, Any] = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCAmelCase_ : str = model.roberta.encoder.layer[i]
lowerCAmelCase_ : Dict = roberta_sent_encoder.layers[i]
lowerCAmelCase_ : Dict = layer.attention
lowerCAmelCase_ : Optional[int] = roberta_layer.self_attn_layer_norm.weight
lowerCAmelCase_ : List[Any] = roberta_layer.self_attn_layer_norm.bias
# self attention
lowerCAmelCase_ : Optional[Any] = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
lowerCAmelCase_ : Any = roberta_layer.self_attn.q_proj.weight
lowerCAmelCase_ : List[Any] = roberta_layer.self_attn.q_proj.bias
lowerCAmelCase_ : str = roberta_layer.self_attn.k_proj.weight
lowerCAmelCase_ : Tuple = roberta_layer.self_attn.k_proj.bias
lowerCAmelCase_ : int = roberta_layer.self_attn.v_proj.weight
lowerCAmelCase_ : Any = roberta_layer.self_attn.v_proj.bias
# self-attention output
lowerCAmelCase_ : Optional[Any] = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
lowerCAmelCase_ : Optional[int] = roberta_layer.self_attn.out_proj.weight
lowerCAmelCase_ : List[str] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
lowerCAmelCase_ : str = roberta_layer.final_layer_norm.weight
lowerCAmelCase_ : Tuple = roberta_layer.final_layer_norm.bias
# intermediate
lowerCAmelCase_ : List[str] = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
lowerCAmelCase_ : int = roberta_layer.fca.weight
lowerCAmelCase_ : int = roberta_layer.fca.bias
# output
lowerCAmelCase_ : str = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
lowerCAmelCase_ : str = roberta_layer.fca.weight
lowerCAmelCase_ : Optional[int] = roberta_layer.fca.bias
# end of layer
if classification_head:
lowerCAmelCase_ : str = roberta.model.classification_heads['''mnli'''].dense.weight
lowerCAmelCase_ : List[Any] = roberta.model.classification_heads['''mnli'''].dense.bias
lowerCAmelCase_ : str = roberta.model.classification_heads['''mnli'''].out_proj.weight
lowerCAmelCase_ : Optional[Any] = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
lowerCAmelCase_ : Optional[int] = roberta.model.encoder.lm_head.dense.weight
lowerCAmelCase_ : List[Any] = roberta.model.encoder.lm_head.dense.bias
lowerCAmelCase_ : List[Any] = roberta.model.encoder.lm_head.layer_norm.weight
lowerCAmelCase_ : List[Any] = roberta.model.encoder.lm_head.layer_norm.bias
lowerCAmelCase_ : str = roberta.model.encoder.lm_head.weight
lowerCAmelCase_ : Union[str, Any] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCAmelCase_ : Tuple = roberta.encode(__UpperCamelCase ).unsqueeze(0 ) # batch of size 1
lowerCAmelCase_ : Dict = model(__UpperCamelCase )[0]
if classification_head:
lowerCAmelCase_ : List[str] = roberta.model.classification_heads['''mnli'''](roberta.extract_features(__UpperCamelCase ) )
else:
lowerCAmelCase_ : List[Any] = roberta.model(__UpperCamelCase )[0]
print(our_output.shape ,their_output.shape )
lowerCAmelCase_ : Tuple = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
lowerCAmelCase_ : List[Any] = torch.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 )
print('''Do both models output the same tensors?''' ,'''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
pathlib.Path(__UpperCamelCase ).mkdir(parents=__UpperCamelCase ,exist_ok=__UpperCamelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
A__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
A__ : List[str] = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 171 |
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ :
def __init__( self , A_ = None , A_ = None , A_=None , A_=None )-> Optional[Any]:
'''simple docstring'''
if not conversation_id:
UpperCamelCase = uuid.uuida()
if past_user_inputs is None:
UpperCamelCase = []
if generated_responses is None:
UpperCamelCase = []
UpperCamelCase = conversation_id
UpperCamelCase = past_user_inputs
UpperCamelCase = generated_responses
UpperCamelCase = text
def __eq__( self , A_ )-> List[Any]:
'''simple docstring'''
if not isinstance(A_ , A_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def UpperCAmelCase_ ( self , A_ , A_ = False )-> int:
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '''
F'''with: "{text}".''' )
UpperCamelCase = text
else:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" new input '''
F'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
UpperCamelCase = text
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
UpperCamelCase = None
def UpperCAmelCase_ ( self , A_ )-> int:
'''simple docstring'''
self.generated_responses.append(A_ )
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self )-> Any:
'''simple docstring'''
UpperCamelCase = F'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
UpperCamelCase = 'user' if is_user else 'bot'
output += F'''{name} >> {text} \n'''
return output
@add_end_docstrings(
snake_case_ , R"""
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
""" , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , *A_ , **A_ )-> Any:
'''simple docstring'''
super().__init__(*A_ , **A_ )
if self.tokenizer.pad_token_id is None:
UpperCamelCase = self.tokenizer.eos_token
def UpperCAmelCase_ ( self , A_=None , A_=None , A_=None , **A_ )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = {}
UpperCamelCase = {}
UpperCamelCase = {}
if min_length_for_response is not None:
UpperCamelCase = min_length_for_response
if minimum_tokens is not None:
UpperCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
UpperCamelCase = generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
UpperCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(A_ )
return preprocess_params, forward_params, postprocess_params
def __call__( self , A_ , A_=0 , **A_ )-> Any:
'''simple docstring'''
UpperCamelCase = super().__call__(A_ , num_workers=A_ , **A_ )
if isinstance(A_ , A_ ) and len(A_ ) == 1:
return outputs[0]
return outputs
def UpperCAmelCase_ ( self , A_ , A_=32 )-> Dict[str, Any]:
'''simple docstring'''
if not isinstance(A_ , A_ ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
F'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
UpperCamelCase = self.tokenizer._build_conversation_input_ids(A_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
UpperCamelCase = self._legacy_parse_and_tokenize(A_ )
if self.framework == "pt":
UpperCamelCase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
UpperCamelCase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def UpperCAmelCase_ ( self , A_ , A_=10 , **A_ )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = generate_kwargs.get('max_length' , self.model.config.max_length )
UpperCamelCase = model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
UpperCamelCase = max_length - minimum_tokens
UpperCamelCase = model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
UpperCamelCase = model_inputs['attention_mask'][:, -trim:]
UpperCamelCase = model_inputs.pop('conversation' )
UpperCamelCase = max_length
UpperCamelCase = self.model.generate(**A_ , **A_ )
if self.model.config.is_encoder_decoder:
UpperCamelCase = 1
else:
UpperCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def UpperCAmelCase_ ( self , A_ , A_=True )-> Tuple:
'''simple docstring'''
UpperCamelCase = model_outputs['output_ids']
UpperCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=A_ , clean_up_tokenization_spaces=A_ , )
UpperCamelCase = model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(A_ )
return conversation
def UpperCAmelCase_ ( self , A_ )-> Dict:
'''simple docstring'''
UpperCamelCase = self.tokenizer.eos_token_id
UpperCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(A_ , add_special_tokens=A_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(A_ , add_special_tokens=A_ ) )
if len(A_ ) > self.tokenizer.model_max_length:
UpperCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 3 | 0 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> str:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
'decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> Dict:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = emb.weight.shape
SCREAMING_SNAKE_CASE_ : Dict = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = emb.weight.data
return lin_layer
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : str = torch.load(SCREAMING_SNAKE_CASE , map_location='cpu' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Namespace(**checkpoint['cfg']['model'] )
SCREAMING_SNAKE_CASE_ : str = checkpoint['model']
remove_ignore_keys_(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = state_dict['decoder.embed_tokens.weight'].shape[0]
SCREAMING_SNAKE_CASE_ : str = {key.replace('decoder' , 'model' ): val for key, val in state_dict.items()}
SCREAMING_SNAKE_CASE_ : Tuple = XGLMConfig(
vocab_size=SCREAMING_SNAKE_CASE , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='gelu' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
SCREAMING_SNAKE_CASE_ : str = XGLMForCausalLM(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
print(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
lowerCAmelCase__: Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
lowerCAmelCase__: str = parser.parse_args()
lowerCAmelCase__: Dict = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 345 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
lowerCAmelCase : List[Any] = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
lowerCAmelCase : List[Any] = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
lowerCAmelCase : Tuple = BeautifulSoup(res.text, 'html.parser')
lowerCAmelCase : List[Any] = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f"""https://google.com{link.get('href')}""")
| 3 | 0 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowerCamelCase_ :
def __init__( self : List[str] , __A : List[Any] , __A : List[Any]=99 , __A : Any=13 , __A : int=16 , __A : Optional[int]=7 , __A : Optional[Any]=True , __A : int=True , __A : Any=True , __A : List[str]=False , __A : Any=True , __A : Any=2 , __A : Dict=32 , __A : Union[str, Any]=4 , __A : str=4 , __A : Tuple=30 , __A : Tuple=0 , __A : Any=1 , __A : Union[str, Any]=2 , __A : Optional[Any]=None , ):
__A : Union[str, Any] = parent
__A : Optional[Any] = batch_size
__A : str = decoder_seq_length
# For common tests
__A : Union[str, Any] = self.decoder_seq_length
__A : int = is_training
__A : str = use_attention_mask
__A : str = use_labels
__A : Optional[int] = vocab_size
__A : List[str] = d_model
__A : List[Any] = d_model
__A : List[str] = decoder_layers
__A : List[str] = decoder_layers
__A : str = decoder_ffn_dim
__A : Dict = decoder_attention_heads
__A : str = decoder_attention_heads
__A : Union[str, Any] = eos_token_id
__A : Optional[int] = bos_token_id
__A : List[str] = pad_token_id
__A : Union[str, Any] = decoder_start_token_id
__A : List[str] = use_cache
__A : List[str] = max_position_embeddings
__A : Union[str, Any] = None
__A : str = decoder_seq_length
__A : Union[str, Any] = 2
__A : Tuple = 1
def lowerCAmelCase_ ( self : List[str] ):
__A : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__A : Optional[int] = None
if self.use_attention_mask:
__A : Tuple = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
__A : Dict = None
if self.use_labels:
__A : Any = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__A : Dict = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def lowerCAmelCase_ ( self : Optional[Any] , __A : Union[str, Any] , __A : List[str] , __A : int , __A : List[str] , ):
__A : str = True
__A : Dict = TrOCRDecoder(config=A_ ).to(A_ ).eval()
__A : str = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
__A : Any = model(A_ , use_cache=A_ )
__A : Dict = model(A_ )
__A : Optional[int] = model(A_ , use_cache=A_ )
self.parent.assertTrue(len(A_ ) == len(A_ ) )
self.parent.assertTrue(len(A_ ) == len(A_ ) + 1 )
__A : int = outputs["""past_key_values"""]
# create hypothetical next token and extent to next_input_ids
__A : List[Any] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
__A : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
__A : str = model(A_ )["""last_hidden_state"""]
__A : Tuple = model(A_ , past_key_values=A_ )["""last_hidden_state"""]
# select random slice
__A : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__A : List[Any] = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
__A : Optional[int] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(A_ , A_ , atol=1e-3 )
def lowerCAmelCase_ ( self : List[Any] ):
__A : str = self.prepare_config_and_inputs()
__A , __A , __A , __A : str = config_and_inputs
__A : List[Any] = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
_lowercase : str = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
_lowercase : Dict = (TrOCRForCausalLM,) if is_torch_available() else ()
_lowercase : Dict = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
_lowercase : str = True
_lowercase : Union[str, Any] = False
def lowerCAmelCase_ ( self : List[Any] ):
__A : Union[str, Any] = TrOCRStandaloneDecoderModelTester(self , is_training=A_ )
__A : Optional[int] = ConfigTester(self , config_class=A_ )
def lowerCAmelCase_ ( self : Union[str, Any] ):
pass
def lowerCAmelCase_ ( self : List[str] ):
pass
def lowerCAmelCase_ ( self : Optional[Any] ):
pass
def lowerCAmelCase_ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : List[Any] ):
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*A_ )
def lowerCAmelCase_ ( self : List[Any] ):
return
@unittest.skip("""The model doesn\'t support left padding""" ) # and it's not used enough to be worth fixing :)
def lowerCAmelCase_ ( self : Union[str, Any] ):
pass
| 17 |
'''simple docstring'''
import numpy as np
def A_( A : str , A : Optional[Any] , A : Tuple , A : Optional[int] , A : str):
UpperCamelCase = int(np.ceil((x_end - xa) / h))
UpperCamelCase = np.zeros((n + 1,))
UpperCamelCase = ya
UpperCamelCase = xa
for k in range(A):
UpperCamelCase = f(A , y[k])
UpperCamelCase = f(x + 0.5 * h , y[k] + 0.5 * h * ka)
UpperCamelCase = f(x + 0.5 * h , y[k] + 0.5 * h * ka)
UpperCamelCase = f(x + h , y[k] + h * ka)
UpperCamelCase = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'facebook/deit-base-distilled-patch16-224': (
'https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class a ( snake_case_ ):
"""simple docstring"""
__lowerCAmelCase = """deit"""
def __init__( self , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=3072 , snake_case_="gelu" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.0_2 , snake_case_=1e-1_2 , snake_case_=224 , snake_case_=16 , snake_case_=3 , snake_case_=True , snake_case_=16 , **snake_case_ , ):
'''simple docstring'''
super().__init__(**A_ )
__UpperCAmelCase: Tuple = hidden_size
__UpperCAmelCase: Optional[Any] = num_hidden_layers
__UpperCAmelCase: Dict = num_attention_heads
__UpperCAmelCase: List[Any] = intermediate_size
__UpperCAmelCase: List[Any] = hidden_act
__UpperCAmelCase: Any = hidden_dropout_prob
__UpperCAmelCase: Tuple = attention_probs_dropout_prob
__UpperCAmelCase: Tuple = initializer_range
__UpperCAmelCase: Optional[Any] = layer_norm_eps
__UpperCAmelCase: Optional[Any] = image_size
__UpperCAmelCase: str = patch_size
__UpperCAmelCase: Optional[int] = num_channels
__UpperCAmelCase: Dict = qkv_bias
__UpperCAmelCase: Optional[Any] = encoder_stride
class a ( snake_case_ ):
"""simple docstring"""
__lowerCAmelCase = version.parse("""1.11""" )
@property
def lowercase_ ( self ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowercase_ ( self ):
'''simple docstring'''
return 1e-4 | 523 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case_)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True})
lowerCAmelCase_ = Features({"""text""": Value("""string""")})
lowerCAmelCase_ = Features({})
lowerCAmelCase_ = "text"
@property
def UpperCAmelCase_ ( self )-> Dict[str, str]:
'''simple docstring'''
return {self.text_column: "text"}
| 3 | 0 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( _A , _A , _A ):
'''simple docstring'''
snake_case_ = BertConfig.from_json_file(_A )
print(f"Building PyTorch model from configuration: {config}" )
snake_case_ = BertForPreTraining(_A )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_A , _A , _A )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , _A )
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase__ : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 376 |
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase : Union[str, Any] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
lowerCAmelCase : List[str] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def A_( A : list[float]):
UpperCamelCase = []
UpperCamelCase = len(A)
for i in range(A):
UpperCamelCase = -1
for j in range(i + 1 , A):
if arr[i] < arr[j]:
UpperCamelCase = arr[j]
break
result.append(A)
return result
def A_( A : list[float]):
UpperCamelCase = []
for i, outer in enumerate(A):
UpperCamelCase = -1
for inner in arr[i + 1 :]:
if outer < inner:
UpperCamelCase = inner
break
result.append(A)
return result
def A_( A : list[float]):
UpperCamelCase = len(A)
UpperCamelCase = []
UpperCamelCase = [-1] * arr_size
for index in reversed(range(A)):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
UpperCamelCase = stack[-1]
stack.append(arr[index])
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
lowerCAmelCase : Optional[Any] = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 3 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class snake_case__ ( snake_case_ ):
@staticmethod
@abstractmethod
def A_ ( __a : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def A_ ( self : int ) -> Optional[Any]:
'''simple docstring'''
raise NotImplementedError()
| 286 |
'''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def A_( A : str):
if not sentence:
return ""
UpperCamelCase = dict(zip(A , A))
return lower_to_upper.get(sentence[0] , sentence[0]) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 3 | 0 |
from ...processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
"""simple docstring"""
a_ = ["image_processor", "feature_extractor"]
a_ = "TvltImageProcessor"
a_ = "TvltFeatureExtractor"
def __init__( self : str , __A : List[Any] , __A : List[Any] ):
super().__init__(image_processor=A_ , feature_extractor=A_ )
snake_case__ : int = image_processor
snake_case__ : Dict = feature_extractor
def __call__( self : List[Any] , __A : List[str]=None , __A : List[str]=None , __A : int=None , __A : List[str]=None , __A : Dict=False , __A : Optional[Any]=False , *__A : Union[str, Any] , **__A : Optional[Any] , ):
if images is None and audio is None:
raise ValueError("You need to specify either an `images` or `audio` input to process." )
snake_case__ : Optional[int] = None
if images is not None:
snake_case__ : Union[str, Any] = self.image_processor(A_ , mask_pixel=A_ , *A_ , **A_ )
if images_mixed is not None:
snake_case__ : List[str] = self.image_processor(A_ , is_mixed=A_ , *A_ , **A_ )
if audio is not None:
snake_case__ : Dict = self.feature_extractor(
A_ , *A_ , sampling_rate=A_ , mask_audio=A_ , **A_ )
snake_case__ : Optional[Any] = {}
if audio is not None:
output_dict.update(A_ )
if images is not None:
output_dict.update(A_ )
if images_mixed_dict is not None:
output_dict.update(A_ )
return output_dict
@property
def _lowercase ( self : str ):
snake_case__ : List[Any] = self.image_processor.model_input_names
snake_case__ : Any = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 297 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCAmelCase : Dict = logging.get_logger(__name__)
# General docstring
lowerCAmelCase : str = 'RegNetConfig'
# Base docstring
lowerCAmelCase : str = 'facebook/regnet-y-040'
lowerCAmelCase : Dict = [1, 10_88, 7, 7]
# Image classification docstring
lowerCAmelCase : Dict = 'facebook/regnet-y-040'
lowerCAmelCase : int = 'tabby, tabby cat'
lowerCAmelCase : int = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ = 3 , A_ = 1 , A_ = 1 , A_ = "relu" , **A_ , )-> str:
'''simple docstring'''
super().__init__(**A_ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
UpperCamelCase = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
UpperCamelCase = tf.keras.layers.ConvaD(
filters=A_ , kernel_size=A_ , strides=A_ , padding='VALID' , groups=A_ , use_bias=A_ , name='convolution' , )
UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='normalization' )
UpperCamelCase = ACTaFN[activation] if activation is not None else tf.identity
def UpperCAmelCase_ ( self , A_ )-> Any:
'''simple docstring'''
UpperCamelCase = self.convolution(self.padding(A_ ) )
UpperCamelCase = self.normalization(A_ )
UpperCamelCase = self.activation(A_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , **A_ )-> Optional[Any]:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = config.num_channels
UpperCamelCase = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def UpperCAmelCase_ ( self , A_ )-> List[Any]:
'''simple docstring'''
UpperCamelCase = shape_list(A_ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
UpperCamelCase = tf.transpose(A_ , perm=(0, 2, 3, 1) )
UpperCamelCase = self.embedder(A_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ = 2 , **A_ )-> List[Any]:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = tf.keras.layers.ConvaD(
filters=A_ , kernel_size=1 , strides=A_ , use_bias=A_ , name='convolution' )
UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='normalization' )
def UpperCAmelCase_ ( self , A_ , A_ = False )-> tf.Tensor:
'''simple docstring'''
return self.normalization(self.convolution(A_ ) , training=A_ )
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ , **A_ )-> Optional[Any]:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A_ , name='pooler' )
UpperCamelCase = [
tf.keras.layers.ConvaD(filters=A_ , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=A_ , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def UpperCAmelCase_ ( self , A_ )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.pooler(A_ )
for layer_module in self.attention:
UpperCamelCase = layer_module(A_ )
UpperCamelCase = hidden_state * pooled
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ , A_ , A_ = 1 , **A_ )-> Dict:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = in_channels != out_channels or stride != 1
UpperCamelCase = max(1 , out_channels // config.groups_width )
UpperCamelCase = (
TFRegNetShortCut(A_ , stride=A_ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
UpperCamelCase = [
TFRegNetConvLayer(A_ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
A_ , stride=A_ , groups=A_ , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(A_ , kernel_size=1 , activation=A_ , name='layer.2' ),
]
UpperCamelCase = ACTaFN[config.hidden_act]
def UpperCAmelCase_ ( self , A_ )-> Tuple:
'''simple docstring'''
UpperCamelCase = hidden_state
for layer_module in self.layers:
UpperCamelCase = layer_module(A_ )
UpperCamelCase = self.shortcut(A_ )
hidden_state += residual
UpperCamelCase = self.activation(A_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ , A_ , A_ = 1 , **A_ )-> Any:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = in_channels != out_channels or stride != 1
UpperCamelCase = max(1 , out_channels // config.groups_width )
UpperCamelCase = (
TFRegNetShortCut(A_ , stride=A_ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
UpperCamelCase = [
TFRegNetConvLayer(A_ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
A_ , stride=A_ , groups=A_ , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(A_ , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(A_ , kernel_size=1 , activation=A_ , name='layer.3' ),
]
UpperCamelCase = ACTaFN[config.hidden_act]
def UpperCAmelCase_ ( self , A_ )-> List[Any]:
'''simple docstring'''
UpperCamelCase = hidden_state
for layer_module in self.layers:
UpperCamelCase = layer_module(A_ )
UpperCamelCase = self.shortcut(A_ )
hidden_state += residual
UpperCamelCase = self.activation(A_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ , A_ , A_ = 2 , A_ = 2 , **A_ )-> Dict:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
UpperCamelCase = [
# downsampling is done in the first layer with stride of 2
layer(A_ , A_ , A_ , stride=A_ , name='layers.0' ),
*[layer(A_ , A_ , A_ , name=F'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def UpperCAmelCase_ ( self , A_ )-> List[Any]:
'''simple docstring'''
for layer_module in self.layers:
UpperCamelCase = layer_module(A_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , **A_ )-> str:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
A_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(A_ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(A_ , A_ , A_ , depth=A_ , name=F'''stages.{i+1}''' ) )
def UpperCAmelCase_ ( self , A_ , A_ = False , A_ = True )-> TFBaseModelOutputWithNoAttention:
'''simple docstring'''
UpperCamelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCamelCase = hidden_states + (hidden_state,)
UpperCamelCase = stage_module(A_ )
if output_hidden_states:
UpperCamelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=A_ , hidden_states=A_ )
@keras_serializable
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
lowerCAmelCase_ = RegNetConfig
def __init__( self , A_ , **A_ )-> Union[str, Any]:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = config
UpperCamelCase = TFRegNetEmbeddings(A_ , name='embedder' )
UpperCamelCase = TFRegNetEncoder(A_ , name='encoder' )
UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A_ , name='pooler' )
@unpack_inputs
def UpperCAmelCase_ ( self , A_ , A_ = None , A_ = None , A_ = False , )-> TFBaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.embedder(A_ , training=A_ )
UpperCamelCase = self.encoder(
A_ , output_hidden_states=A_ , return_dict=A_ , training=A_ )
UpperCamelCase = encoder_outputs[0]
UpperCamelCase = self.pooler(A_ )
# Change to NCHW output format have uniformity in the modules
UpperCamelCase = tf.transpose(A_ , perm=(0, 3, 1, 2) )
UpperCamelCase = tf.transpose(A_ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
UpperCamelCase = tuple([tf.transpose(A_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A_ , pooler_output=A_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = RegNetConfig
lowerCAmelCase_ = """regnet"""
lowerCAmelCase_ = """pixel_values"""
@property
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
lowerCAmelCase : str = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCAmelCase : List[str] = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , snake_case_ , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ , *A_ , **A_ )-> List[Any]:
'''simple docstring'''
super().__init__(A_ , *A_ , **A_ )
UpperCamelCase = TFRegNetMainLayer(A_ , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=A_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCAmelCase_ ( self , A_ , A_ = None , A_ = None , A_=False , )-> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
'''simple docstring'''
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.regnet(
pixel_values=A_ , output_hidden_states=A_ , return_dict=A_ , training=A_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , snake_case_ , )
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_):
def __init__( self , A_ , *A_ , **A_ )-> str:
'''simple docstring'''
super().__init__(A_ , *A_ , **A_ )
UpperCamelCase = config.num_labels
UpperCamelCase = TFRegNetMainLayer(A_ , name='regnet' )
# classification head
UpperCamelCase = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCAmelCase_ ( self , A_ = None , A_ = None , A_ = None , A_ = None , A_=False , )-> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
'''simple docstring'''
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.regnet(
A_ , output_hidden_states=A_ , return_dict=A_ , training=A_ )
UpperCamelCase = outputs.pooler_output if return_dict else outputs[1]
UpperCamelCase = self.classifier[0](A_ )
UpperCamelCase = self.classifier[1](A_ )
UpperCamelCase = None if labels is None else self.hf_compute_loss(labels=A_ , logits=A_ )
if not return_dict:
UpperCamelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=A_ , logits=A_ , hidden_states=outputs.hidden_states )
| 3 | 0 |
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class A_(snake_case_ ):
"""simple docstring"""
def __init__( self , A=0.0_1 , A=1000 ):
_lowerCamelCase : Optional[int] = p_stop
_lowerCamelCase : List[Any] = max_length
def __iter__( self ):
_lowerCamelCase : Tuple = 0
_lowerCamelCase : List[str] = False
while not stop and count < self.max_length:
yield count
count += 1
_lowerCamelCase : List[str] = random.random() < self.p_stop
class A_(unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self , A , A , A=False , A=True ):
_lowerCamelCase : Optional[Any] = [
BatchSamplerShard(A_ , 2 , A_ , split_batches=A_ , even_batches=A_ )
for i in range(2 )
]
_lowerCamelCase : int = [list(A_ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(A_ ) for shard in batch_sampler_shards] , [len(A_ ) for e in expected] )
self.assertListEqual(A_ , A_ )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Tuple = BatchSampler(range(24 ) , batch_size=3 , drop_last=A_ )
_lowerCamelCase : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(A_ , A_ )
_lowerCamelCase : Dict = BatchSampler(range(24 ) , batch_size=3 , drop_last=A_ )
# Expected shouldn't change
self.check_batch_sampler_shards(A_ , A_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_lowerCamelCase : Tuple = BatchSampler(range(21 ) , batch_size=3 , drop_last=A_ )
_lowerCamelCase : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(A_ , A_ )
_lowerCamelCase : int = BatchSampler(range(21 ) , batch_size=3 , drop_last=A_ )
_lowerCamelCase : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A_ , A_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_lowerCamelCase : Optional[int] = BatchSampler(range(22 ) , batch_size=3 , drop_last=A_ )
_lowerCamelCase : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(A_ , A_ )
_lowerCamelCase : Optional[int] = BatchSampler(range(22 ) , batch_size=3 , drop_last=A_ )
_lowerCamelCase : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A_ , A_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_lowerCamelCase : Dict = BatchSampler(range(20 ) , batch_size=3 , drop_last=A_ )
_lowerCamelCase : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(A_ , A_ )
_lowerCamelCase : int = BatchSampler(range(20 ) , batch_size=3 , drop_last=A_ )
_lowerCamelCase : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A_ , A_ )
# Check the shards when the dataset is very small.
_lowerCamelCase : Optional[int] = BatchSampler(range(2 ) , batch_size=3 , drop_last=A_ )
_lowerCamelCase : Union[str, Any] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(A_ , A_ )
_lowerCamelCase : Tuple = BatchSampler(range(2 ) , batch_size=3 , drop_last=A_ )
_lowerCamelCase : str = [[], []]
self.check_batch_sampler_shards(A_ , A_ )
def _lowerCAmelCase ( self ):
_lowerCamelCase : List[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=A_ )
_lowerCamelCase : List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ )
_lowerCamelCase : Any = BatchSampler(range(24 ) , batch_size=4 , drop_last=A_ )
# Expected shouldn't change
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ )
# Check the shards when the dataset is not a round multiple of batch size.
_lowerCamelCase : Optional[int] = BatchSampler(range(22 ) , batch_size=4 , drop_last=A_ )
_lowerCamelCase : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ )
_lowerCamelCase : int = BatchSampler(range(22 ) , batch_size=4 , drop_last=A_ )
_lowerCamelCase : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_lowerCamelCase : List[Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=A_ )
_lowerCamelCase : List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ )
_lowerCamelCase : Union[str, Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=A_ )
_lowerCamelCase : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ )
# Check the shards when the dataset is very small.
_lowerCamelCase : str = BatchSampler(range(2 ) , batch_size=4 , drop_last=A_ )
_lowerCamelCase : str = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ )
_lowerCamelCase : str = BatchSampler(range(2 ) , batch_size=4 , drop_last=A_ )
_lowerCamelCase : Dict = [[], []]
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ )
def _lowerCAmelCase ( self ):
_lowerCamelCase : str = BatchSampler(range(24 ) , batch_size=3 , drop_last=A_ )
_lowerCamelCase : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(A_ , A_ , even_batches=A_ )
_lowerCamelCase : str = BatchSampler(range(24 ) , batch_size=3 , drop_last=A_ )
# Expected shouldn't change
self.check_batch_sampler_shards(A_ , A_ , even_batches=A_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_lowerCamelCase : Optional[Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=A_ )
_lowerCamelCase : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A_ , A_ , even_batches=A_ )
_lowerCamelCase : Any = BatchSampler(range(21 ) , batch_size=3 , drop_last=A_ )
_lowerCamelCase : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A_ , A_ , even_batches=A_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_lowerCamelCase : Union[str, Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=A_ )
_lowerCamelCase : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(A_ , A_ , even_batches=A_ )
_lowerCamelCase : Dict = BatchSampler(range(22 ) , batch_size=3 , drop_last=A_ )
_lowerCamelCase : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A_ , A_ , even_batches=A_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_lowerCamelCase : Optional[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=A_ )
_lowerCamelCase : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A_ , A_ , even_batches=A_ )
_lowerCamelCase : List[str] = BatchSampler(range(20 ) , batch_size=3 , drop_last=A_ )
_lowerCamelCase : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A_ , A_ , even_batches=A_ )
# Check the shards when the dataset is very small.
_lowerCamelCase : Optional[int] = BatchSampler(range(2 ) , batch_size=3 , drop_last=A_ )
_lowerCamelCase : Optional[Any] = [[[0, 1]], []]
self.check_batch_sampler_shards(A_ , A_ , even_batches=A_ )
_lowerCamelCase : List[str] = BatchSampler(range(2 ) , batch_size=3 , drop_last=A_ )
_lowerCamelCase : Tuple = [[], []]
self.check_batch_sampler_shards(A_ , A_ , even_batches=A_ )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[int] = BatchSampler(range(24 ) , batch_size=4 , drop_last=A_ )
_lowerCamelCase : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ , even_batches=A_ )
_lowerCamelCase : Optional[int] = BatchSampler(range(24 ) , batch_size=4 , drop_last=A_ )
# Expected shouldn't change
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ , even_batches=A_ )
# Check the shards when the dataset is not a round multiple of batch size.
_lowerCamelCase : Optional[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=A_ )
_lowerCamelCase : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ , even_batches=A_ )
_lowerCamelCase : Optional[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=A_ )
_lowerCamelCase : Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ , even_batches=A_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_lowerCamelCase : Union[str, Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=A_ )
_lowerCamelCase : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ , even_batches=A_ )
_lowerCamelCase : Union[str, Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=A_ )
_lowerCamelCase : List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ , even_batches=A_ )
# Check the shards when the dataset is very small.
_lowerCamelCase : str = BatchSampler(range(2 ) , batch_size=4 , drop_last=A_ )
_lowerCamelCase : Optional[Any] = [[[0, 1]], []]
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ , even_batches=A_ )
_lowerCamelCase : str = BatchSampler(range(2 ) , batch_size=4 , drop_last=A_ )
_lowerCamelCase : Tuple = [[], []]
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ , even_batches=A_ )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Tuple = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
_lowerCamelCase : str = [BatchSamplerShard(A_ , 2 , A_ , even_batches=A_ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def _lowerCAmelCase ( self , A , A , A , A=False , A=2 , A=False ):
random.seed(A_ )
_lowerCamelCase : Any = list(A_ )
_lowerCamelCase : Any = [
IterableDatasetShard(
A_ , batch_size=A_ , drop_last=A_ , num_processes=A_ , process_index=A_ , split_batches=A_ , )
for i in range(A_ )
]
_lowerCamelCase : Dict = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(A_ )
iterable_dataset_lists.append(list(A_ ) )
_lowerCamelCase : Any = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_lowerCamelCase : Union[str, Any] = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(A_ ) , len(A_ ) )
self.assertTrue(len(A_ ) % shard_batch_size == 0 )
_lowerCamelCase : str = []
for idx in range(0 , len(A_ ) , A_ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(A_ ) < len(A_ ):
reference += reference
self.assertListEqual(A_ , reference[: len(A_ )] )
def _lowerCAmelCase ( self ):
_lowerCamelCase : List[str] = 42
_lowerCamelCase : Union[str, Any] = RandomIterableDataset()
self.check_iterable_dataset_shards(A_ , A_ , batch_size=4 , drop_last=A_ , split_batches=A_ )
self.check_iterable_dataset_shards(A_ , A_ , batch_size=4 , drop_last=A_ , split_batches=A_ )
self.check_iterable_dataset_shards(A_ , A_ , batch_size=4 , drop_last=A_ , split_batches=A_ )
self.check_iterable_dataset_shards(A_ , A_ , batch_size=4 , drop_last=A_ , split_batches=A_ )
# Edge case with a very small dataset
_lowerCamelCase : Optional[Any] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(A_ , A_ , batch_size=4 , drop_last=A_ , split_batches=A_ )
self.check_iterable_dataset_shards(A_ , A_ , batch_size=4 , drop_last=A_ , split_batches=A_ )
self.check_iterable_dataset_shards(A_ , A_ , batch_size=4 , drop_last=A_ , split_batches=A_ )
self.check_iterable_dataset_shards(A_ , A_ , batch_size=4 , drop_last=A_ , split_batches=A_ )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[int] = BatchSampler(range(16 ) , batch_size=4 , drop_last=A_ )
_lowerCamelCase : Optional[Any] = SkipBatchSampler(A_ , 2 )
self.assertListEqual(list(A_ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _lowerCAmelCase ( self ):
_lowerCamelCase : str = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Dict = DataLoader(list(range(16 ) ) , batch_size=4 )
_lowerCamelCase : Optional[int] = skip_first_batches(A_ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _lowerCAmelCase ( self ):
_lowerCamelCase : List[Any] = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(A_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(A_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def _lowerCAmelCase ( self ):
Accelerator()
_lowerCamelCase : Optional[Any] = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(A_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(A_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 437 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : Optional[int] = {
'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """perceiver"""
def __init__( self , A_=256 , A_=1280 , A_=768 , A_=1 , A_=26 , A_=8 , A_=8 , A_=None , A_=None , A_="kv" , A_=1 , A_=1 , A_="gelu" , A_=0.1 , A_=0.02 , A_=1e-12 , A_=True , A_=262 , A_=2048 , A_=56 , A_=[368, 496] , A_=16 , A_=1920 , A_=16 , A_=[1, 16, 224, 224] , **A_ , )-> str:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = num_latents
UpperCamelCase = d_latents
UpperCamelCase = d_model
UpperCamelCase = num_blocks
UpperCamelCase = num_self_attends_per_block
UpperCamelCase = num_self_attention_heads
UpperCamelCase = num_cross_attention_heads
UpperCamelCase = qk_channels
UpperCamelCase = v_channels
UpperCamelCase = cross_attention_shape_for_attention
UpperCamelCase = self_attention_widening_factor
UpperCamelCase = cross_attention_widening_factor
UpperCamelCase = hidden_act
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = use_query_residual
# masked language modeling attributes
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
# image classification attributes
UpperCamelCase = image_size
# flow attributes
UpperCamelCase = train_size
# multimodal autoencoding attributes
UpperCamelCase = num_frames
UpperCamelCase = audio_samples_per_frame
UpperCamelCase = samples_per_patch
UpperCamelCase = output_shape
class SCREAMING_SNAKE_CASE__ ( snake_case_):
@property
def UpperCAmelCase_ ( self )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def UpperCAmelCase_ ( self )-> float:
'''simple docstring'''
return 1e-4
def UpperCAmelCase_ ( self , A_ , A_ = -1 , A_ = -1 , A_ = -1 , A_ = False , A_ = None , A_ = 3 , A_ = 40 , A_ = 40 , )-> Mapping[str, Any]:
'''simple docstring'''
if isinstance(A_ , A_ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase = compute_effective_axis_dimension(
A_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase = preprocessor.num_special_tokens_to_add(A_ )
UpperCamelCase = compute_effective_axis_dimension(
A_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A_ )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase = [' '.join(['a'] ) * seq_length] * batch_size
UpperCamelCase = dict(preprocessor(A_ , return_tensors=A_ ) )
UpperCamelCase = inputs.pop('input_ids' )
return inputs
elif isinstance(A_ , A_ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase = compute_effective_axis_dimension(A_ , fixed_dimension=OnnxConfig.default_fixed_batch )
UpperCamelCase = self._generate_dummy_images(A_ , A_ , A_ , A_ )
UpperCamelCase = dict(preprocessor(images=A_ , return_tensors=A_ ) )
UpperCamelCase = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 3 | 0 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _UpperCAmelCase ( UpperCamelCase: Tuple ):
"""simple docstring"""
__lowerCAmelCase = FileLock(str(tmpdir / "foo.lock" ) )
__lowerCAmelCase = FileLock(str(tmpdir / "foo.lock" ) )
__lowerCAmelCase = 0.01
with locka.acquire():
with pytest.raises(UpperCamelCase ):
__lowerCAmelCase = time.time()
locka.acquire(UpperCamelCase )
assert time.time() - _start > timeout
def _UpperCAmelCase ( UpperCamelCase: List[Any] ):
"""simple docstring"""
__lowerCAmelCase = "a" * 1_0_0_0 + ".lock"
__lowerCAmelCase = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(UpperCamelCase )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
__lowerCAmelCase = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCamelCase ):
locka.acquire(0 )
| 611 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Dict = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """mctct"""
def __init__( self , A_=8065 , A_=1536 , A_=36 , A_=6144 , A_=4 , A_=384 , A_=920 , A_=1e-5 , A_=0.3 , A_="relu" , A_=0.02 , A_=0.3 , A_=0.3 , A_=1 , A_=0 , A_=2 , A_=1 , A_=0.3 , A_=1 , A_=(7,) , A_=(3,) , A_=80 , A_=1 , A_=None , A_="sum" , A_=False , **A_ , )-> str:
'''simple docstring'''
super().__init__(**A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = intermediate_size
UpperCamelCase = num_attention_heads
UpperCamelCase = attention_head_dim
UpperCamelCase = max_position_embeddings
UpperCamelCase = layer_norm_eps
UpperCamelCase = layerdrop
UpperCamelCase = hidden_act
UpperCamelCase = initializer_range
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = pad_token_id
UpperCamelCase = bos_token_id
UpperCamelCase = eos_token_id
UpperCamelCase = conv_glu_dim
UpperCamelCase = conv_dropout
UpperCamelCase = num_conv_layers
UpperCamelCase = input_feat_per_channel
UpperCamelCase = input_channels
UpperCamelCase = conv_channels
UpperCamelCase = ctc_loss_reduction
UpperCamelCase = ctc_zero_infinity
# prevents config testing fail with exporting to json
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '
F'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
| 3 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowercase_ ( snake_case_ ):
A_ = "unispeech-sat"
def __init__( self : str , __lowerCamelCase : Union[str, Any]=32 , __lowerCamelCase : Tuple=768 , __lowerCamelCase : Tuple=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : str=3072 , __lowerCamelCase : str="gelu" , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : Tuple=0.0 , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Optional[int]=0.0_2 , __lowerCamelCase : Optional[Any]=1E-5 , __lowerCamelCase : str="group" , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) , __lowerCamelCase : Dict=(5, 2, 2, 2, 2, 2, 2) , __lowerCamelCase : Dict=(10, 3, 3, 3, 3, 2, 2) , __lowerCamelCase : Any=False , __lowerCamelCase : List[str]=128 , __lowerCamelCase : Any=16 , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : int=True , __lowerCamelCase : List[Any]=0.0_5 , __lowerCamelCase : Dict=10 , __lowerCamelCase : str=2 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : List[str]=10 , __lowerCamelCase : Dict=0 , __lowerCamelCase : Any=320 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : List[Any]=100 , __lowerCamelCase : Tuple=256 , __lowerCamelCase : List[Any]=256 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : Dict="mean" , __lowerCamelCase : Tuple=False , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Any=256 , __lowerCamelCase : Dict=(512, 512, 512, 512, 1500) , __lowerCamelCase : Optional[int]=(5, 3, 3, 1, 1) , __lowerCamelCase : int=(1, 2, 3, 1, 1) , __lowerCamelCase : Any=512 , __lowerCamelCase : Dict=0 , __lowerCamelCase : Union[str, Any]=1 , __lowerCamelCase : int=2 , __lowerCamelCase : str=504 , **__lowerCamelCase : Optional[Any] , ):
super().__init__(**A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ )
snake_case__ : Tuple = hidden_size
snake_case__ : Dict = feat_extract_norm
snake_case__ : List[str] = feat_extract_activation
snake_case__ : Optional[Any] = list(A_ )
snake_case__ : Optional[int] = list(A_ )
snake_case__ : Optional[Any] = list(A_ )
snake_case__ : Dict = conv_bias
snake_case__ : str = num_conv_pos_embeddings
snake_case__ : Optional[Any] = num_conv_pos_embedding_groups
snake_case__ : Any = len(self.conv_dim )
snake_case__ : Optional[int] = num_hidden_layers
snake_case__ : List[str] = intermediate_size
snake_case__ : Union[str, Any] = hidden_act
snake_case__ : Any = num_attention_heads
snake_case__ : Tuple = hidden_dropout
snake_case__ : Any = attention_dropout
snake_case__ : Optional[int] = activation_dropout
snake_case__ : Tuple = feat_proj_dropout
snake_case__ : Any = final_dropout
snake_case__ : Tuple = layerdrop
snake_case__ : Tuple = layer_norm_eps
snake_case__ : str = initializer_range
snake_case__ : List[Any] = vocab_size
snake_case__ : Optional[Any] = num_clusters
snake_case__ : int = do_stable_layer_norm
snake_case__ : int = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case__ : Optional[Any] = apply_spec_augment
snake_case__ : List[Any] = mask_time_prob
snake_case__ : Optional[int] = mask_time_length
snake_case__ : List[str] = mask_time_min_masks
snake_case__ : Optional[int] = mask_feature_prob
snake_case__ : Any = mask_feature_length
snake_case__ : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
snake_case__ : List[str] = num_codevectors_per_group
snake_case__ : Tuple = num_codevector_groups
snake_case__ : List[str] = contrastive_logits_temperature
snake_case__ : Any = feat_quantizer_dropout
snake_case__ : Tuple = num_negatives
snake_case__ : Optional[int] = codevector_dim
snake_case__ : Tuple = proj_codevector_dim
snake_case__ : Dict = diversity_loss_weight
# ctc loss
snake_case__ : int = ctc_loss_reduction
snake_case__ : Tuple = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case__ : int = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case__ : Union[str, Any] = list(A_ )
snake_case__ : Union[str, Any] = list(A_ )
snake_case__ : int = list(A_ )
snake_case__ : List[Any] = xvector_output_dim
@property
def _lowerCAmelCase ( self : int ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 270 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCAmelCase : Tuple = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCAmelCase : Optional[int] = TaTokenizerFast
lowerCAmelCase : Any = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCAmelCase : Tuple = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
)
| 3 | 0 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
a__ : Union[str, Any] = RobertaTokenizer
a__ : Optional[Any] = RobertaTokenizerFast
a__ : List[str] = True
a__ : Tuple = {"""cls_token""": """<s>"""}
def UpperCamelCase__ ( self) -> Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase :Optional[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__UpperCamelCase :int = dict(zip(A_ , range(len(A_))))
__UpperCamelCase :Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__UpperCamelCase :List[Any] = {'''unk_token''': '''<unk>'''}
__UpperCamelCase :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
__UpperCamelCase :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
fp.write(json.dumps(A_) + '''\n''')
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(A_))
def UpperCamelCase__ ( self , **__lowercase) -> str:
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A_)
def UpperCamelCase__ ( self , **__lowercase) -> Union[str, Any]:
kwargs.update(self.special_tokens_map)
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **A_)
def UpperCamelCase__ ( self , __lowercase) -> Union[str, Any]:
__UpperCamelCase :Any = '''lower newer'''
__UpperCamelCase :Dict = '''lower newer'''
return input_text, output_text
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Union[str, Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map)
__UpperCamelCase :List[Any] = '''lower newer'''
__UpperCamelCase :List[Any] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__UpperCamelCase :Union[str, Any] = tokenizer.tokenize(A_) # , add_prefix_space=True)
self.assertListEqual(A_ , A_)
__UpperCamelCase :Optional[Any] = tokens + [tokenizer.unk_token]
__UpperCamelCase :List[str] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_) , A_)
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :List[str] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=A_) , [0, 31_414, 232, 328, 2])
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=A_) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :List[str] = self.tokenizer_class.from_pretrained('''roberta-base''')
__UpperCamelCase :str = tokenizer.encode('''sequence builders''' , add_special_tokens=A_)
__UpperCamelCase :List[str] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=A_)
__UpperCamelCase :List[str] = tokenizer.encode(
'''sequence builders''' , add_special_tokens=A_ , add_prefix_space=A_)
__UpperCamelCase :Dict = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=A_ , add_prefix_space=A_)
__UpperCamelCase :List[str] = tokenizer.build_inputs_with_special_tokens(A_)
__UpperCamelCase :Any = tokenizer.build_inputs_with_special_tokens(A_ , A_)
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :str = self.get_tokenizer()
__UpperCamelCase :Tuple = '''Encode this sequence.'''
__UpperCamelCase :str = tokenizer.byte_encoder[''' '''.encode('''utf-8''')[0]]
# Testing encoder arguments
__UpperCamelCase :List[str] = tokenizer.encode(A_ , add_special_tokens=A_ , add_prefix_space=A_)
__UpperCamelCase :Tuple = tokenizer.convert_ids_to_tokens(encoded[0])[0]
self.assertNotEqual(A_ , A_)
__UpperCamelCase :int = tokenizer.encode(A_ , add_special_tokens=A_ , add_prefix_space=A_)
__UpperCamelCase :Any = tokenizer.convert_ids_to_tokens(encoded[0])[0]
self.assertEqual(A_ , A_)
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''})
__UpperCamelCase :int = tokenizer.encode(A_ , add_special_tokens=A_)
__UpperCamelCase :Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[1])[0]
self.assertNotEqual(A_ , A_)
# Testing spaces after special tokens
__UpperCamelCase :Optional[Any] = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(A_ , lstrip=A_ , rstrip=A_)}) # mask token has a left space
__UpperCamelCase :str = tokenizer.convert_tokens_to_ids(A_)
__UpperCamelCase :int = '''Encode <mask> sequence'''
__UpperCamelCase :Optional[int] = '''Encode <mask>sequence'''
__UpperCamelCase :Any = tokenizer.encode(A_)
__UpperCamelCase :List[str] = encoded.index(A_)
__UpperCamelCase :Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0]
self.assertEqual(A_ , A_)
__UpperCamelCase :int = tokenizer.encode(A_)
__UpperCamelCase :Optional[Any] = encoded.index(A_)
__UpperCamelCase :Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0]
self.assertNotEqual(A_ , A_)
def UpperCamelCase__ ( self) -> Any:
pass
def UpperCamelCase__ ( self) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
__UpperCamelCase :Optional[Any] = self.rust_tokenizer_class.from_pretrained(A_ , **A_)
__UpperCamelCase :Union[str, Any] = self.tokenizer_class.from_pretrained(A_ , **A_)
__UpperCamelCase :Optional[int] = '''A, <mask> AllenNLP sentence.'''
__UpperCamelCase :int = tokenizer_r.encode_plus(A_ , add_special_tokens=A_ , return_token_type_ids=A_)
__UpperCamelCase :Optional[Any] = tokenizer_p.encode_plus(A_ , add_special_tokens=A_ , return_token_type_ids=A_)
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids''']) , sum(tokens_p['''token_type_ids''']))
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask''']) / len(tokens_r['''attention_mask''']) , sum(tokens_p['''attention_mask''']) / len(tokens_p['''attention_mask''']) , )
__UpperCamelCase :List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''])
__UpperCamelCase :Optional[int] = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''])
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2])
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2])
self.assertSequenceEqual(
A_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''])
self.assertSequenceEqual(
A_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''])
def UpperCamelCase__ ( self) -> Optional[Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2):
__UpperCamelCase :Optional[Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_)
__UpperCamelCase :Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__())
__UpperCamelCase :Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__())
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , A_)
self.assertEqual(post_processor_state['''add_prefix_space'''] , A_)
self.assertEqual(post_processor_state['''trim_offsets'''] , A_)
def UpperCamelCase__ ( self) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
__UpperCamelCase :Tuple = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
__UpperCamelCase :Optional[Any] = f"""{text_of_1_token} {text_of_1_token}"""
__UpperCamelCase :Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A_ , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_)
__UpperCamelCase :Optional[int] = tokenizer_r(A_ , return_offsets_mapping=A_ , add_special_tokens=A_)
self.assertEqual(encoding.offset_mapping[0] , (0, len(A_)))
self.assertEqual(
encoding.offset_mapping[1] , (len(A_) + 1, len(A_) + 1 + len(A_)) , )
__UpperCamelCase :int = self.rust_tokenizer_class.from_pretrained(
A_ , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_)
__UpperCamelCase :str = tokenizer_r(A_ , return_offsets_mapping=A_ , add_special_tokens=A_)
self.assertEqual(encoding.offset_mapping[0] , (0, len(A_)))
self.assertEqual(
encoding.offset_mapping[1] , (len(A_) + 1, len(A_) + 1 + len(A_)) , )
__UpperCamelCase :int = self.rust_tokenizer_class.from_pretrained(
A_ , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_)
__UpperCamelCase :List[Any] = tokenizer_r(A_ , return_offsets_mapping=A_ , add_special_tokens=A_)
self.assertEqual(encoding.offset_mapping[0] , (0, len(A_)))
self.assertEqual(
encoding.offset_mapping[1] , (len(A_), len(A_) + 1 + len(A_)) , )
__UpperCamelCase :str = self.rust_tokenizer_class.from_pretrained(
A_ , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_)
__UpperCamelCase :Tuple = tokenizer_r(A_ , return_offsets_mapping=A_ , add_special_tokens=A_)
self.assertEqual(encoding.offset_mapping[0] , (0, len(A_)))
self.assertEqual(
encoding.offset_mapping[1] , (len(A_), len(A_) + 1 + len(A_)) , )
__UpperCamelCase :Union[str, Any] = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__UpperCamelCase :str = self.rust_tokenizer_class.from_pretrained(
A_ , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_)
__UpperCamelCase :str = tokenizer_r(A_ , return_offsets_mapping=A_ , add_special_tokens=A_)
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A_)))
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A_) + 1, 1 + len(A_) + 1 + len(A_)) , )
__UpperCamelCase :Optional[int] = self.rust_tokenizer_class.from_pretrained(
A_ , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_)
__UpperCamelCase :Optional[int] = tokenizer_r(A_ , return_offsets_mapping=A_ , add_special_tokens=A_)
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A_)))
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A_), 1 + len(A_) + 1 + len(A_)) , )
__UpperCamelCase :Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
A_ , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_)
__UpperCamelCase :Tuple = tokenizer_r(A_ , return_offsets_mapping=A_ , add_special_tokens=A_)
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A_)))
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A_), 1 + len(A_) + 1 + len(A_)) , )
| 167 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=False , A_=True , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , )-> Dict:
'''simple docstring'''
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size if size is not None else {'height': 18, 'width': 20}
UpperCamelCase = do_thumbnail
UpperCamelCase = do_align_axis
UpperCamelCase = do_pad
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( snake_case_ , unittest.TestCase):
lowerCAmelCase_ = DonutImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
UpperCamelCase = DonutImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'do_thumbnail' ) )
self.assertTrue(hasattr(A_ , 'do_align_long_axis' ) )
self.assertTrue(hasattr(A_ , 'do_pad' ) )
self.assertTrue(hasattr(A_ , 'do_normalize' ) )
self.assertTrue(hasattr(A_ , 'image_mean' ) )
self.assertTrue(hasattr(A_ , 'image_std' ) )
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
pass
@is_flaky()
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 3 | 0 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class __snake_case ( snake_case_ ):
def __init__( self : List[str] , A_ : Tuple , A_ : Any , A_ : Optional[int]):
lowerCAmelCase_ : List[str] = dataset
lowerCAmelCase_ : Dict = process
lowerCAmelCase_ : Any = params
def __len__( self : Tuple):
return len(self.dataset)
def __getitem__( self : Union[str, Any] , A_ : Tuple):
lowerCAmelCase_ : int = self.dataset[i]
lowerCAmelCase_ : str = self.process(A_ , **self.params)
return processed
class __snake_case ( snake_case_ ):
def __init__( self : Any , A_ : Optional[int] , A_ : Optional[int] , A_ : List[str] , A_ : Optional[int]=None):
lowerCAmelCase_ : Optional[Any] = loader
lowerCAmelCase_ : Optional[int] = infer
lowerCAmelCase_ : Tuple = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
lowerCAmelCase_ : Dict = None
lowerCAmelCase_ : Tuple = loader_batch_size
# Internal bookkeeping
lowerCAmelCase_ : int = None
lowerCAmelCase_ : Tuple = None
def __len__( self : Tuple):
return len(self.loader)
def __iter__( self : Any):
lowerCAmelCase_ : List[str] = iter(self.loader)
return self
def UpperCAmelCase__ ( self : Union[str, Any]):
if isinstance(self._loader_batch_data , torch.Tensor):
# Batch data is simple tensor, just fetch the slice
lowerCAmelCase_ : Union[str, Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
lowerCAmelCase_ : List[str] = {}
for k, element in self._loader_batch_data.items():
if isinstance(A_ , A_):
# Convert ModelOutput to tuple first
lowerCAmelCase_ : Tuple = element.to_tuple()
if isinstance(element[0] , torch.Tensor):
lowerCAmelCase_ : Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0) for el in element)
elif isinstance(element[0] , np.ndarray):
lowerCAmelCase_ : Dict = tuple(np.expand_dims(el[self._loader_batch_index] , 0) for el in element)
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(A_ , A_):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor):
lowerCAmelCase_ : List[Any] = tuple(el[self._loader_batch_index].unsqueeze(0) for el in element)
elif isinstance(element[0] , np.ndarray):
lowerCAmelCase_ : int = tuple(np.expand_dims(el[self._loader_batch_index] , 0) for el in element)
continue
if element is None:
# This can happen for optional data that get passed around
lowerCAmelCase_ : Any = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCAmelCase_ : List[str] = element[self._loader_batch_index].unsqueeze(0)
elif isinstance(element[self._loader_batch_index] , np.ndarray):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCAmelCase_ : List[Any] = np.expand_dims(element[self._loader_batch_index] , 0)
else:
# This is typically a list, so no need to `unsqueeze`.
lowerCAmelCase_ : Dict = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
lowerCAmelCase_ : Dict = self._loader_batch_data.__class__(A_)
self._loader_batch_index += 1
return result
def UpperCAmelCase__ ( self : List[str]):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
lowerCAmelCase_ : Tuple = next(self.iterator)
lowerCAmelCase_ : Dict = self.infer(A_ , **self.params)
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(A_ , torch.Tensor):
lowerCAmelCase_ : Union[str, Any] = processed
else:
lowerCAmelCase_ : int = list(processed.keys())[0]
lowerCAmelCase_ : Tuple = processed[key]
if isinstance(A_ , A_):
lowerCAmelCase_ : str = len(A_)
else:
lowerCAmelCase_ : Tuple = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCAmelCase_ : Tuple = observed_batch_size
# Setting internal index to unwrap the batch
lowerCAmelCase_ : Any = processed
lowerCAmelCase_ : Union[str, Any] = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class __snake_case ( snake_case_ ):
def __init__( self : Dict , A_ : str , A_ : Any , A_ : List[Any] , A_ : Optional[int]=None):
super().__init__(A_ , A_ , A_)
def __iter__( self : str):
lowerCAmelCase_ : int = iter(self.loader)
lowerCAmelCase_ : List[str] = None
return self
def UpperCAmelCase__ ( self : Optional[Any]):
if self.subiterator is None:
lowerCAmelCase_ : List[str] = self.infer(next(self.iterator) , **self.params)
try:
# Try to return next item
lowerCAmelCase_ : Tuple = next(self.subiterator)
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
lowerCAmelCase_ : str = self.infer(next(self.iterator) , **self.params)
lowerCAmelCase_ : List[Any] = next(self.subiterator)
return processed
class __snake_case ( snake_case_ ):
def __iter__( self : int):
lowerCAmelCase_ : List[str] = iter(self.loader)
return self
def UpperCAmelCase__ ( self : List[Any]):
lowerCAmelCase_ : Union[str, Any] = False
lowerCAmelCase_ : Dict = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
lowerCAmelCase_ : List[Any] = self.loader_batch_item()
lowerCAmelCase_ : Dict = item.pop('''is_last''')
accumulator.append(A_)
if is_last:
return accumulator
while not is_last:
lowerCAmelCase_ : Union[str, Any] = self.infer(next(self.iterator) , **self.params)
if self.loader_batch_size is not None:
if isinstance(A_ , torch.Tensor):
lowerCAmelCase_ : List[Any] = processed
else:
lowerCAmelCase_ : Tuple = list(processed.keys())[0]
lowerCAmelCase_ : List[str] = processed[key]
if isinstance(A_ , A_):
lowerCAmelCase_ : Union[str, Any] = len(A_)
else:
lowerCAmelCase_ : Union[str, Any] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCAmelCase_ : Any = observed_batch_size
lowerCAmelCase_ : Optional[int] = processed
lowerCAmelCase_ : str = 0
while self._loader_batch_index < self.loader_batch_size:
lowerCAmelCase_ : Optional[int] = self.loader_batch_item()
lowerCAmelCase_ : List[Any] = item.pop('''is_last''')
accumulator.append(A_)
if is_last:
return accumulator
else:
lowerCAmelCase_ : Optional[Any] = processed
lowerCAmelCase_ : Optional[Any] = item.pop('''is_last''')
accumulator.append(A_)
return accumulator
class __snake_case ( snake_case_ ):
def __init__( self : int , A_ : str , A_ : Union[str, Any]):
lowerCAmelCase_ : Optional[Any] = dataset
lowerCAmelCase_ : int = key
def __len__( self : Union[str, Any]):
return len(self.dataset)
def __getitem__( self : str , A_ : List[Any]):
return self.dataset[i][self.key]
class __snake_case ( snake_case_ ):
def __init__( self : List[Any] , A_ : Any , A_ : List[str] , A_ : List[Any]):
lowerCAmelCase_ : str = dataset
lowerCAmelCase_ : Dict = keya
lowerCAmelCase_ : int = keya
def __len__( self : int):
return len(self.dataset)
def __getitem__( self : int , A_ : Tuple):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 171 |
'''simple docstring'''
def A_( A : list[int]):
UpperCamelCase = []
if len(A) == 1:
return [nums.copy()]
for _ in range(len(A)):
UpperCamelCase = nums.pop(0)
UpperCamelCase = permute(A)
for perm in permutations:
perm.append(A)
result.extend(A)
nums.append(A)
return result
def A_( A : str):
def backtrack(A : str):
if start == len(A) - 1:
output.append(nums[:])
else:
for i in range(A , len(A)):
UpperCamelCase , UpperCamelCase = nums[i], nums[start]
backtrack(start + 1)
UpperCamelCase , UpperCamelCase = nums[i], nums[start] # backtrack
UpperCamelCase = []
backtrack(0)
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
lowerCAmelCase : Dict = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 3 | 0 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> List[str]:
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for data in source_data:
for i, el in enumerate(SCREAMING_SNAKE_CASE ):
if len(SCREAMING_SNAKE_CASE ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(SCREAMING_SNAKE_CASE ) )
return data_lists
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
SCREAMING_SNAKE_CASE_ : int = []
for dlist, weight in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = min(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = max(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
SCREAMING_SNAKE_CASE_ : List[str] = f'Invalid weight of {weight:f} provided'
raise ValueError(SCREAMING_SNAKE_CASE )
score_lists.append(SCREAMING_SNAKE_CASE )
return score_lists
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> Dict:
SCREAMING_SNAKE_CASE_ : Any = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = final_scores[j] + ele
return final_scores
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : Any = get_data(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = calculate_each_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = generate_final_scores(SCREAMING_SNAKE_CASE )
# append scores to source data
for i, ele in enumerate(SCREAMING_SNAKE_CASE ):
source_data[i].append(SCREAMING_SNAKE_CASE )
return source_data
| 345 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def A_( A : float , A : float , A : int):
UpperCamelCase = x
UpperCamelCase = y
for step in range(A): # noqa: B007
UpperCamelCase = a * a - b * b + x
UpperCamelCase = 2 * a * b + y
UpperCamelCase = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def A_( A : float):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def A_( A : float):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(A , 1 , 1))
def A_( A : int = 800 , A : int = 600 , A : float = -0.6 , A : float = 0 , A : float = 3.2 , A : int = 50 , A : bool = True , ):
UpperCamelCase = Image.new('RGB' , (image_width, image_height))
UpperCamelCase = img.load()
# loop through the image-coordinates
for image_x in range(A):
for image_y in range(A):
# determine the figure-coordinates based on the image-coordinates
UpperCamelCase = figure_width / image_width * image_height
UpperCamelCase = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCamelCase = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCamelCase = get_distance(A , A , A)
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCamelCase = get_color_coded_rgb(A)
else:
UpperCamelCase = get_black_and_white_rgb(A)
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCAmelCase : Any = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 3 | 0 |
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : int ) -> List[Any]:
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
__A : Optional[Any] = str(bin(a__ ) )[2:] # remove the leading "0b"
__A : Dict = str(bin(a__ ) )[2:] # remove the leading "0b"
__A : Dict = max(len(a__ ) ,len(a__ ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(a__ ) ,b_binary.zfill(a__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase : Optional[Any] = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 3 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 523 |
'''simple docstring'''
lowerCAmelCase : Optional[Any] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def A_( A : dict , A : str , A : Optional[Any]):
UpperCamelCase = set()
# keep track of all the paths to be checked
UpperCamelCase = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
UpperCamelCase = queue.pop(0)
# get the last node from the path
UpperCamelCase = path[-1]
if node not in explored:
UpperCamelCase = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
UpperCamelCase = list(A)
new_path.append(A)
queue.append(A)
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(A)
# in case there's no path between the 2 nodes
return []
def A_( A : dict , A : str , A : Tuple):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
UpperCamelCase = [start]
UpperCamelCase = set(A)
# Keep tab on distances from `start` node.
UpperCamelCase = {start: 0, target: -1}
while queue:
UpperCamelCase = queue.pop(0)
if node == target:
UpperCamelCase = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node])
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(A)
queue.append(A)
UpperCamelCase = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
| 3 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
lowercase__ : Optional[int] = None
lowercase__ : str = logging.get_logger(__name__)
lowercase__ : Tuple = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowercase__ : Any = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json',
},
}
lowercase__ : Union[str, Any] = {
'camembert-base': 512,
}
lowercase__ : Tuple = '▁'
class UpperCAmelCase ( snake_case_ ):
'''simple docstring'''
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
lowerCAmelCase_ = CamembertTokenizer
def __init__( self : Tuple , __lowercase : Union[str, Any]=None , __lowercase : int=None , __lowercase : Union[str, Any]="<s>" , __lowercase : Tuple="</s>" , __lowercase : List[Any]="</s>" , __lowercase : Dict="<s>" , __lowercase : Optional[Any]="<unk>" , __lowercase : Dict="<pad>" , __lowercase : Union[str, Any]="<mask>" , __lowercase : List[str]=["<s>NOTUSED", "</s>NOTUSED"] , **__lowercase : str , ):
"""simple docstring"""
snake_case_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
A_ , tokenizer_file=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , additional_special_tokens=A_ , **A_ , )
snake_case_ = vocab_file
snake_case_ = False if not self.vocab_file else True
def snake_case__ ( self : Dict , __lowercase : Union[str, Any] , __lowercase : List[str] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case__ ( self : List[str] , __lowercase : Tuple , __lowercase : Optional[int] = None ):
"""simple docstring"""
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case__ ( self : int , __lowercase : List[str] , __lowercase : Any = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(A_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case_ = os.path.join(
A_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file , A_ )
return (out_vocab_file,)
| 376 |
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class SCREAMING_SNAKE_CASE__ :
def __init__( self )-> Dict:
'''simple docstring'''
UpperCamelCase = ''
UpperCamelCase = ''
UpperCamelCase = []
UpperCamelCase = 0
UpperCamelCase = 256
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
def UpperCAmelCase_ ( self , A_ )-> str:
'''simple docstring'''
UpperCamelCase = cva.imread(A_ , 0 )
UpperCamelCase = copy.deepcopy(self.img )
UpperCamelCase , UpperCamelCase , UpperCamelCase = plt.hist(self.img.ravel() , 256 , [0, 256] , label='x' )
UpperCamelCase = np.sum(A_ )
for i in range(len(A_ ) ):
UpperCamelCase = x[i] / self.k
self.sk += prk
UpperCamelCase = (self.L - 1) * self.sk
if self.rem != 0:
UpperCamelCase = int(last % last )
UpperCamelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(A_ )
UpperCamelCase = int(np.ma.count(self.img ) / self.img[1].size )
UpperCamelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCamelCase = self.img[j][i]
if num != self.last_list[num]:
UpperCamelCase = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
plt.hist(self.img.ravel() , 256 , [0, 256] )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
lowerCAmelCase : str = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 3 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : Union[str, Any] = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[Any] = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
A__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 286 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """unispeech-sat"""
def __init__( self , A_=32 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.1 , A_=0.1 , A_=0.02 , A_=1e-5 , A_="group" , A_="gelu" , A_=(512, 512, 512, 512, 512, 512, 512) , A_=(5, 2, 2, 2, 2, 2, 2) , A_=(10, 3, 3, 3, 3, 2, 2) , A_=False , A_=128 , A_=16 , A_=False , A_=True , A_=0.05 , A_=10 , A_=2 , A_=0.0 , A_=10 , A_=0 , A_=320 , A_=2 , A_=0.1 , A_=100 , A_=256 , A_=256 , A_=0.1 , A_="mean" , A_=False , A_=False , A_=256 , A_=(512, 512, 512, 512, 1500) , A_=(5, 3, 3, 1, 1) , A_=(1, 2, 3, 1, 1) , A_=512 , A_=0 , A_=1 , A_=2 , A_=504 , **A_ , )-> Tuple:
'''simple docstring'''
super().__init__(**A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ )
UpperCamelCase = hidden_size
UpperCamelCase = feat_extract_norm
UpperCamelCase = feat_extract_activation
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
UpperCamelCase = conv_bias
UpperCamelCase = num_conv_pos_embeddings
UpperCamelCase = num_conv_pos_embedding_groups
UpperCamelCase = len(self.conv_dim )
UpperCamelCase = num_hidden_layers
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = feat_proj_dropout
UpperCamelCase = final_dropout
UpperCamelCase = layerdrop
UpperCamelCase = layer_norm_eps
UpperCamelCase = initializer_range
UpperCamelCase = vocab_size
UpperCamelCase = num_clusters
UpperCamelCase = do_stable_layer_norm
UpperCamelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase = apply_spec_augment
UpperCamelCase = mask_time_prob
UpperCamelCase = mask_time_length
UpperCamelCase = mask_time_min_masks
UpperCamelCase = mask_feature_prob
UpperCamelCase = mask_feature_length
UpperCamelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCamelCase = num_codevectors_per_group
UpperCamelCase = num_codevector_groups
UpperCamelCase = contrastive_logits_temperature
UpperCamelCase = feat_quantizer_dropout
UpperCamelCase = num_negatives
UpperCamelCase = codevector_dim
UpperCamelCase = proj_codevector_dim
UpperCamelCase = diversity_loss_weight
# ctc loss
UpperCamelCase = ctc_loss_reduction
UpperCamelCase = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
UpperCamelCase = xvector_output_dim
@property
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 3 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : Tuple = logging.get_logger(__name__)
__lowerCamelCase : List[Any] = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
"""simple docstring"""
a_ = "data2vec-text"
def __init__( self : Optional[int] , __A : Dict=3_0_5_2_2 , __A : Any=7_6_8 , __A : Tuple=1_2 , __A : Optional[int]=1_2 , __A : Tuple=3_0_7_2 , __A : Optional[Any]="gelu" , __A : Dict=0.1 , __A : Tuple=0.1 , __A : Union[str, Any]=5_1_2 , __A : List[str]=2 , __A : Optional[Any]=0.0_2 , __A : Optional[int]=1e-1_2 , __A : Optional[Any]=1 , __A : str=0 , __A : Optional[int]=2 , __A : str="absolute" , __A : List[str]=True , __A : Union[str, Any]=None , **__A : int , ):
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
snake_case__ : List[Any] = vocab_size
snake_case__ : Tuple = hidden_size
snake_case__ : Dict = num_hidden_layers
snake_case__ : Union[str, Any] = num_attention_heads
snake_case__ : Optional[Any] = hidden_act
snake_case__ : Union[str, Any] = intermediate_size
snake_case__ : Tuple = hidden_dropout_prob
snake_case__ : Optional[Any] = attention_probs_dropout_prob
snake_case__ : Tuple = max_position_embeddings
snake_case__ : str = type_vocab_size
snake_case__ : str = initializer_range
snake_case__ : Union[str, Any] = layer_norm_eps
snake_case__ : Union[str, Any] = position_embedding_type
snake_case__ : int = use_cache
snake_case__ : List[str] = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
"""simple docstring"""
@property
def _lowercase ( self : Optional[int] ):
if self.task == "multiple-choice":
snake_case__ : Dict = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case__ : Optional[int] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 297 |
'''simple docstring'''
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self , A_ , A_=100 , A_=13 , A_=30 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=4 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=3 , A_=None , A_=[0, 1, 2, 3] , )-> Any:
'''simple docstring'''
UpperCamelCase = parent
UpperCamelCase = 100
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = scope
UpperCamelCase = out_indices
UpperCamelCase = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase = (image_size // patch_size) ** 2
UpperCamelCase = num_patches + 1
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ )-> List[str]:
'''simple docstring'''
UpperCamelCase = BeitModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ )-> Any:
'''simple docstring'''
UpperCamelCase = BeitForMaskedImageModeling(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.type_sequence_label_size
UpperCamelCase = BeitForImageClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = BeitForImageClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = self.num_labels
UpperCamelCase = BeitForSemanticSegmentation(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
UpperCamelCase = model(A_ , labels=A_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , unittest.TestCase):
lowerCAmelCase_ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{
"""feature-extraction""": BeitModel,
"""image-classification""": BeitForImageClassification,
"""image-segmentation""": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = BeitModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A_ )
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(A_ ), BeitForMaskedImageModeling]:
continue
UpperCamelCase = model_class(A_ )
model.to(A_ )
model.train()
UpperCamelCase = self._prepare_for_class(A_ , A_ , return_labels=A_ )
UpperCamelCase = model(**A_ ).loss
loss.backward()
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCamelCase = False
UpperCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(A_ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCamelCase = model_class(A_ )
model.gradient_checkpointing_enable()
model.to(A_ )
model.train()
UpperCamelCase = self._prepare_for_class(A_ , A_ , return_labels=A_ )
UpperCamelCase = model(**A_ ).loss
loss.backward()
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = _config_zero_init(A_ )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(config=A_ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = BeitModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def A_( ):
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
@cached_property
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(A_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).pixel_values.to(A_ )
# prepare bool_masked_pos
UpperCamelCase = torch.ones((1, 196) , dtype=torch.bool ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(pixel_values=A_ , bool_masked_pos=A_ )
UpperCamelCase = outputs.logits
# verify the logits
UpperCamelCase = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , A_ )
UpperCamelCase = torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(A_ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , A_ , atol=1e-2 ) )
@slow
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(A_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
UpperCamelCase = outputs.logits
# verify the logits
UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(logits.shape , A_ )
UpperCamelCase = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(A_ )
self.assertTrue(torch.allclose(logits[0, :3] , A_ , atol=1e-4 ) )
UpperCamelCase = 281
self.assertEqual(logits.argmax(-1 ).item() , A_ )
@slow
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
A_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
UpperCamelCase = outputs.logits
# verify the logits
UpperCamelCase = torch.Size((1, 21841) )
self.assertEqual(logits.shape , A_ )
UpperCamelCase = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(A_ )
self.assertTrue(torch.allclose(logits[0, :3] , A_ , atol=1e-4 ) )
UpperCamelCase = 2396
self.assertEqual(logits.argmax(-1 ).item() , A_ )
@slow
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
UpperCamelCase = model.to(A_ )
UpperCamelCase = BeitImageProcessor(do_resize=A_ , size=640 , do_center_crop=A_ )
UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
UpperCamelCase = Image.open(ds[0]['file'] )
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
UpperCamelCase = outputs.logits
# verify the logits
UpperCamelCase = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , A_ )
UpperCamelCase = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
UpperCamelCase = torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
] , device=A_ , )
else:
UpperCamelCase = torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
] , device=A_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A_ , atol=1e-4 ) )
@slow
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
UpperCamelCase = model.to(A_ )
UpperCamelCase = BeitImageProcessor(do_resize=A_ , size=640 , do_center_crop=A_ )
UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
UpperCamelCase = Image.open(ds[0]['file'] )
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
UpperCamelCase = outputs.logits.detach().cpu()
UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=A_ , target_sizes=[(500, 300)] )
UpperCamelCase = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , A_ )
UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=A_ )
UpperCamelCase = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , A_ )
| 3 | 0 |
"""simple docstring"""
from random import shuffle
import tensorflow as tf
from numpy import array
def UpperCAmelCase_ ( __a : Any , __a : List[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = int(__a )
assert noofclusters < len(__a )
# Find out the dimensionality
_lowerCamelCase : Tuple = len(vectors[0] )
# Will help select random centroids from among the available vectors
_lowerCamelCase : Tuple = list(range(len(__a ) ) )
shuffle(__a )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
_lowerCamelCase : Optional[int] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
_lowerCamelCase : Optional[Any] = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
_lowerCamelCase : Tuple = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(__a )
]
##These nodes will assign the centroid Variables the appropriate
##values
_lowerCamelCase : Any = tf.placeholder('float64' , [dim] )
_lowerCamelCase : str = []
for centroid in centroids:
cent_assigns.append(tf.assign(__a , __a ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
_lowerCamelCase : str = [tf.Variable(0 ) for i in range(len(__a ) )]
##These nodes will assign an assignment Variable the appropriate
##value
_lowerCamelCase : Any = tf.placeholder('int32' )
_lowerCamelCase : Optional[Any] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(__a , __a ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
_lowerCamelCase : Optional[int] = tf.placeholder('float' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
_lowerCamelCase : Tuple = tf.reduce_mean(__a , 0 )
##Node for computing Euclidean distances
# Placeholders for input
_lowerCamelCase : Any = tf.placeholder('float' , [dim] )
_lowerCamelCase : List[str] = tf.placeholder('float' , [dim] )
_lowerCamelCase : Optional[Any] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(__a , __a ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
_lowerCamelCase : Union[str, Any] = tf.placeholder('float' , [noofclusters] )
_lowerCamelCase : int = tf.argmin(__a , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
_lowerCamelCase : int = tf.initialize_all_variables()
# Initialize all variables
sess.run(__a )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
_lowerCamelCase : str = 1_00
for _ in range(__a ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(__a ) ):
_lowerCamelCase : Optional[int] = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
_lowerCamelCase : Tuple = [
sess.run(__a , feed_dict={va: vect, va: sess.run(__a )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
_lowerCamelCase : Tuple = sess.run(
__a , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(__a ):
# Collect all the vectors assigned to this cluster
_lowerCamelCase : Tuple = [
vectors[i]
for i in range(len(__a ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
_lowerCamelCase : Optional[Any] = sess.run(
__a , feed_dict={mean_input: array(__a )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
_lowerCamelCase : int = sess.run(__a )
_lowerCamelCase : Optional[int] = sess.run(__a )
return centroids, assignments
| 437 |
'''simple docstring'''
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase : Dict = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( enum.Enum):
lowerCAmelCase_ = 0
lowerCAmelCase_ = 1
@add_end_docstrings(snake_case_)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """generated"""
def __init__( self , *A_ , **A_ )-> Optional[int]:
'''simple docstring'''
super().__init__(*A_ , **A_ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCAmelCase_ ( self , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , **A_ , )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = {}
if truncation is not None:
UpperCamelCase = truncation
UpperCamelCase = generate_kwargs
UpperCamelCase = {}
if return_tensors is not None and return_type is None:
UpperCamelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
UpperCamelCase = return_type
if clean_up_tokenization_spaces is not None:
UpperCamelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCamelCase = self.tokenizer.encode(A_ , add_special_tokens=A_ )
if len(A_ ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
UpperCamelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> Optional[int]:
'''simple docstring'''
return True
def UpperCAmelCase_ ( self , *A_ , A_ )-> Any:
'''simple docstring'''
UpperCamelCase = self.model.config.prefix if self.model.config.prefix is not None else ''
if isinstance(args[0] , A_ ):
if self.tokenizer.pad_token_id is None:
raise ValueError('Please make sure that the tokenizer has a pad_token_id when using a batch input' )
UpperCamelCase = ([prefix + arg for arg in args[0]],)
UpperCamelCase = True
elif isinstance(args[0] , A_ ):
UpperCamelCase = (prefix + args[0],)
UpperCamelCase = False
else:
raise ValueError(
F''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
UpperCamelCase = self.tokenizer(*A_ , padding=A_ , truncation=A_ , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *A_ , **A_ )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = super().__call__(*A_ , **A_ )
if (
isinstance(args[0] , A_ )
and all(isinstance(A_ , A_ ) for el in args[0] )
and all(len(A_ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCAmelCase_ ( self , A_ , A_=TruncationStrategy.DO_NOT_TRUNCATE , **A_ )-> Any:
'''simple docstring'''
UpperCamelCase = self._parse_and_tokenize(A_ , truncation=A_ , **A_ )
return inputs
def UpperCAmelCase_ ( self , A_ , **A_ )-> int:
'''simple docstring'''
if self.framework == "pt":
UpperCamelCase , UpperCamelCase = model_inputs['input_ids'].shape
elif self.framework == "tf":
UpperCamelCase , UpperCamelCase = tf.shape(model_inputs['input_ids'] ).numpy()
UpperCamelCase = generate_kwargs.get('min_length' , self.model.config.min_length )
UpperCamelCase = generate_kwargs.get('max_length' , self.model.config.max_length )
self.check_inputs(A_ , generate_kwargs['min_length'] , generate_kwargs['max_length'] )
UpperCamelCase = self.model.generate(**A_ , **A_ )
UpperCamelCase = output_ids.shape[0]
if self.framework == "pt":
UpperCamelCase = output_ids.reshape(A_ , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
UpperCamelCase = tf.reshape(A_ , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCAmelCase_ ( self , A_ , A_=ReturnType.TEXT , A_=False )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
UpperCamelCase = {F'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
UpperCamelCase = {
F'''{self.return_name}_text''': self.tokenizer.decode(
A_ , skip_special_tokens=A_ , clean_up_tokenization_spaces=A_ , )
}
records.append(A_ )
return records
@add_end_docstrings(snake_case_)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """summary"""
def __call__( self , *A_ , **A_ )-> Optional[int]:
'''simple docstring'''
return super().__call__(*A_ , **A_ )
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> bool:
'''simple docstring'''
if max_length < min_length:
logger.warning(F'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
F'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
'a summarization task, where outputs shorter than the input are typically wanted, you might '
F'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(snake_case_)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """translation"""
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> List[Any]:
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
F'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
'increasing your max_length manually, e.g. translator(\'...\', max_length=400)' )
return True
def UpperCAmelCase_ ( self , *A_ , A_=TruncationStrategy.DO_NOT_TRUNCATE , A_=None , A_=None )-> Dict:
'''simple docstring'''
if getattr(self.tokenizer , '_build_translation_inputs' , A_ ):
return self.tokenizer._build_translation_inputs(
*A_ , return_tensors=self.framework , truncation=A_ , src_lang=A_ , tgt_lang=A_ )
else:
return super()._parse_and_tokenize(*A_ , truncation=A_ )
def UpperCAmelCase_ ( self , A_=None , A_=None , **A_ )-> str:
'''simple docstring'''
UpperCamelCase , UpperCamelCase , UpperCamelCase = super()._sanitize_parameters(**A_ )
if src_lang is not None:
UpperCamelCase = src_lang
if tgt_lang is not None:
UpperCamelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
UpperCamelCase = kwargs.get('task' , self.task )
UpperCamelCase = task.split('_' )
if task and len(A_ ) == 4:
# translation, XX, to YY
UpperCamelCase = items[1]
UpperCamelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *A_ , **A_ )-> Any:
'''simple docstring'''
return super().__call__(*A_ , **A_ )
| 3 | 0 |
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
UpperCamelCase_ = _symbol_database.Default()
UpperCamelCase_ = _descriptor_pool.Default().AddSerializedFile(
b"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
UpperCamelCase_ = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
UpperCamelCase_ = None
UpperCamelCase_ = B'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
UpperCamelCase_ = 4_5
UpperCamelCase_ = 1_5_8_1
UpperCamelCase_ = 1_5_1_7
UpperCamelCase_ = 1_5_7_0
UpperCamelCase_ = 1_5_8_4
UpperCamelCase_ = 1_7_9_3
UpperCamelCase_ = 1_7_9_5
UpperCamelCase_ = 1_9_1_6
UpperCamelCase_ = 1_8_6_4
UpperCamelCase_ = 1_9_0_5
UpperCamelCase_ = 1_9_1_9
UpperCamelCase_ = 2_4_2_9
UpperCamelCase_ = 2_2_0_8
UpperCamelCase_ = 2_4_1_8
UpperCamelCase_ = 2_3_2_3
UpperCamelCase_ = 2_4_0_7
# @@protoc_insertion_point(module_scope)
| 611 |
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = 0
lowerCAmelCase_ = False
lowerCAmelCase_ = 3.0
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=A_ ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} )
@require_cuda
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
UpperCamelCase = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
UpperCamelCase = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , A_ )
@require_multi_gpu
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(A_ , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase : Tuple = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
lowerCAmelCase : List[str] = Accelerator(kwargs_handlers=[ddp_scaler])
lowerCAmelCase : List[Any] = torch.nn.Linear(1_00, 2_00)
lowerCAmelCase : int = accelerator.prepare(model)
# Check the values changed in kwargs
lowerCAmelCase : Dict = ''
lowerCAmelCase : Dict = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 3 | 0 |
'''simple docstring'''
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowercase_ ( unittest.TestCase ):
def _lowerCAmelCase ( self : List[Any] ):
snake_case__ : List[str] = mock.Mock()
snake_case__ : int = 500
snake_case__ : int = {}
snake_case__ : Tuple = HTTPError
snake_case__ : List[str] = {}
# Download this model to make sure it's in the cache.
snake_case__ : Dict = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=A_ ) as mock_head:
snake_case__ : Any = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _lowerCAmelCase ( self : Union[str, Any] ):
snake_case__ : Optional[int] = mock.Mock()
snake_case__ : Dict = 500
snake_case__ : int = {}
snake_case__ : Union[str, Any] = HTTPError
snake_case__ : Tuple = {}
# Download this model to make sure it's in the cache.
snake_case__ : str = GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=A_ ) as mock_head:
snake_case__ : Any = GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def _lowerCAmelCase ( self : Tuple ):
try:
snake_case__ : Any = tempfile.mktemp()
with open(A_ , 'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , A_ )
snake_case__ : Optional[int] = AlbertTokenizer.from_pretrained(A_ )
finally:
os.remove(A_ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' , 'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , A_ )
snake_case__ : int = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def _lowerCAmelCase ( self : Any ):
snake_case__ : List[str] = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class lowercase_ ( unittest.TestCase ):
A_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def _lowerCAmelCase ( cls : int ):
snake_case__ : Any = TOKEN
HfFolder.save_token(A_ )
@classmethod
def _lowerCAmelCase ( cls : Optional[Any] ):
try:
delete_repo(token=cls._token , repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def _lowerCAmelCase ( self : Dict ):
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : List[str] = os.path.join(A_ , 'vocab.txt' )
with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
snake_case__ : Union[str, Any] = BertTokenizer(A_ )
tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token )
snake_case__ : Union[str, Any] = BertTokenizer.from_pretrained(F"{USER}/test-tokenizer" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(A_ , repo_id='test-tokenizer' , push_to_hub=A_ , use_auth_token=self._token )
snake_case__ : List[Any] = BertTokenizer.from_pretrained(F"{USER}/test-tokenizer" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def _lowerCAmelCase ( self : int ):
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : Dict = os.path.join(A_ , 'vocab.txt' )
with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
snake_case__ : Optional[int] = BertTokenizer(A_ )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token )
snake_case__ : int = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
A_ , repo_id='valid_org/test-tokenizer-org' , push_to_hub=A_ , use_auth_token=self._token )
snake_case__ : Dict = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def _lowerCAmelCase ( self : Any ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : List[str] = os.path.join(A_ , 'vocab.txt' )
with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
snake_case__ : List[Any] = CustomTokenizer(A_ )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
snake_case__ : Optional[int] = AutoTokenizer.from_pretrained(F"{USER}/test-dynamic-tokenizer" , trust_remote_code=A_ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : List[str] = os.path.join(A_ , 'vocab.txt' )
with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
snake_case__ : Any = BertTokenizerFast.from_pretrained(A_ )
bert_tokenizer.save_pretrained(A_ )
snake_case__ : Dict = CustomTokenizerFast.from_pretrained(A_ )
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
snake_case__ : Any = AutoTokenizer.from_pretrained(F"{USER}/test-dynamic-tokenizer" , trust_remote_code=A_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast' )
snake_case__ : Any = AutoTokenizer.from_pretrained(
F"{USER}/test-dynamic-tokenizer" , use_fast=A_ , trust_remote_code=A_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
class lowercase_ ( unittest.TestCase ):
def _lowerCAmelCase ( self : Optional[Any] ):
snake_case__ : Optional[Any] = Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def _lowerCAmelCase ( self : str ):
snake_case__ : Optional[Any] = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS]', ' This is a ', 'extra_id_100'] )
def _lowerCAmelCase ( self : str ):
snake_case__ : Dict = Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) , ['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) , ['BC', 'A'] )
def _lowerCAmelCase ( self : List[Any] ):
snake_case__ : Dict = Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def _lowerCAmelCase ( self : Optional[Any] ):
snake_case__ : int = Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def _lowerCAmelCase ( self : Union[str, Any] ):
snake_case__ : List[Any] = Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) , ['AB', 'C'] )
def _lowerCAmelCase ( self : Dict ):
snake_case__ : Union[str, Any] = Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) , ['ABC', 'D'] )
def _lowerCAmelCase ( self : Optional[int] ):
snake_case__ : Any = Trie()
snake_case__ : Any = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(A_ , ['AB', 'C'] )
| 270 |
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_):
@register_to_config
def __init__( self , A_ , A_ = None , A_ = None )-> Tuple:
'''simple docstring'''
super().__init__()
UpperCamelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
UpperCamelCase = torch.zeros(A_ , A_ )
else:
UpperCamelCase = None
UpperCamelCase = torch.nn.Parameter(A_ )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ , )-> Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=A_ , transformer=A_ , text_encoder=A_ , tokenizer=A_ , scheduler=A_ , learned_classifier_free_sampling_embeddings=A_ , )
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> Tuple:
'''simple docstring'''
UpperCamelCase = len(A_ ) if isinstance(A_ , A_ ) else 1
# get prompt text embeddings
UpperCamelCase = self.tokenizer(
A_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
UpperCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
UpperCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=A_ )
# duplicate text embeddings for each generation per prompt
UpperCamelCase = prompt_embeds.repeat_interleave(A_ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
UpperCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings
UpperCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(A_ , 1 , 1 )
else:
UpperCamelCase = [''] * batch_size
UpperCamelCase = text_input_ids.shape[-1]
UpperCamelCase = self.tokenizer(
A_ , padding='max_length' , max_length=A_ , truncation=A_ , return_tensors='pt' , )
UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
UpperCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=A_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase = negative_prompt_embeds.shape[1]
UpperCamelCase = negative_prompt_embeds.repeat(1 , A_ , 1 )
UpperCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , A_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , A_ , A_ = 100 , A_ = 5.0 , A_ = 1.0 , A_ = 1 , A_ = None , A_ = None , A_ = "pil" , A_ = True , A_ = None , A_ = 1 , )-> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(A_ , A_ ):
UpperCamelCase = 1
elif isinstance(A_ , A_ ):
UpperCamelCase = len(A_ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(A_ )}''' )
UpperCamelCase = batch_size * num_images_per_prompt
UpperCamelCase = guidance_scale > 1.0
UpperCamelCase = self._encode_prompt(A_ , A_ , A_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A_ , A_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(A_ )}.''' )
# get the initial completely masked latents unless the user supplied it
UpperCamelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
UpperCamelCase = self.transformer.num_vector_embeds - 1
UpperCamelCase = torch.full(A_ , A_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
UpperCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(A_ , device=self.device )
UpperCamelCase = self.scheduler.timesteps.to(self.device )
UpperCamelCase = latents
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the sample if we are doing classifier free guidance
UpperCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
UpperCamelCase = self.transformer(A_ , encoder_hidden_states=A_ , timestep=A_ ).sample
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase = model_output.chunk(2 )
UpperCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(A_ , dim=1 , keepdim=A_ )
UpperCamelCase = self.truncate(A_ , A_ )
# remove `log(0)`'s (`-inf`s)
UpperCamelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase = self.scheduler.step(A_ , timestep=A_ , sample=A_ , generator=A_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A_ , A_ , A_ )
UpperCamelCase = self.vqvae.config.vq_embed_dim
UpperCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
UpperCamelCase = self.vqvae.quantize.get_codebook_entry(A_ , shape=A_ )
UpperCamelCase = self.vqvae.decode(A_ , force_not_quantize=A_ ).sample
UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
def UpperCAmelCase_ ( self , A_ , A_ )-> torch.FloatTensor:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = torch.sort(A_ , 1 , descending=A_ )
UpperCamelCase = torch.exp(A_ )
UpperCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
UpperCamelCase = torch.full_like(keep_mask[:, 0:1, :] , A_ )
UpperCamelCase = torch.cat((all_true, keep_mask) , dim=1 )
UpperCamelCase = keep_mask[:, :-1, :]
UpperCamelCase = keep_mask.gather(1 , indices.argsort(1 ) )
UpperCamelCase = log_p_x_0.clone()
UpperCamelCase = -torch.inf # -inf = log(0)
return rv
| 3 | 0 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = tmp_path / '''cache'''
__UpperCamelCase :List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCamelCase :Optional[int] = ParquetDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = tmp_path / '''cache'''
__UpperCamelCase :int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCamelCase :Optional[Any] = features.copy() if features else default_expected_features
__UpperCamelCase :List[Any] = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCamelCase :Any = ParquetDatasetReader(SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[str] = tmp_path / '''cache'''
__UpperCamelCase :int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCamelCase :Union[str, Any] = ParquetDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , split=SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Tuple = parquet_path
elif issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Tuple = [parquet_path]
__UpperCamelCase :Tuple = tmp_path / '''cache'''
__UpperCamelCase :int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCamelCase :Tuple = ParquetDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=("train",) ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for split in splits:
__UpperCamelCase :Union[str, Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = tmp_path / '''cache'''
__UpperCamelCase :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCamelCase :Union[str, Any] = ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :int = tmp_path / '''cache'''
__UpperCamelCase :Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCamelCase :Optional[int] = features.copy() if features else default_expected_features
__UpperCamelCase :int = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCamelCase :Optional[int] = ParquetDatasetReader({'''train''': parquet_path} , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if split:
__UpperCamelCase :Tuple = {split: parquet_path}
else:
__UpperCamelCase :List[str] = '''train'''
__UpperCamelCase :Tuple = {'''train''': parquet_path, '''test''': parquet_path}
__UpperCamelCase :Any = tmp_path / '''cache'''
__UpperCamelCase :List[str] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCamelCase :List[str] = ParquetDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[str] = ParquetDatasetWriter(SCREAMING_SNAKE_CASE , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
__UpperCamelCase :List[Any] = pq.ParquetFile(tmp_path / '''foo.parquet''' )
__UpperCamelCase :List[Any] = pf.read()
assert dataset.data.table == output_table
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = str(shared_datadir / '''test_image_rgb.jpg''' )
__UpperCamelCase :Dict = {'''image''': [image_path]}
__UpperCamelCase :List[Any] = Features({'''image''': Image()} )
__UpperCamelCase :Union[str, Any] = Dataset.from_dict(SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = ParquetDatasetWriter(SCREAMING_SNAKE_CASE , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
__UpperCamelCase :Optional[Any] = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
__UpperCamelCase :Optional[int] = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=SCREAMING_SNAKE_CASE ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
assert get_writer_batch_size(SCREAMING_SNAKE_CASE ) == expected
| 167 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Union[str, Any] = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 3 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __snake_case ( unittest.TestCase ):
def __init__( self : List[str] , A_ : List[Any] , A_ : List[str]=7 , A_ : Tuple=3 , A_ : Optional[Any]=1_8 , A_ : Optional[int]=3_0 , A_ : Optional[int]=4_0_0 , A_ : List[Any]=True , A_ : Dict=None , A_ : Union[str, Any]=True , A_ : Any=False , A_ : Dict=True , A_ : Optional[int]=True , A_ : Dict=[0.5, 0.5, 0.5] , A_ : Tuple=[0.5, 0.5, 0.5] , ):
lowerCAmelCase_ : Optional[Any] = parent
lowerCAmelCase_ : Union[str, Any] = batch_size
lowerCAmelCase_ : Optional[Any] = num_channels
lowerCAmelCase_ : Optional[int] = image_size
lowerCAmelCase_ : Union[str, Any] = min_resolution
lowerCAmelCase_ : Union[str, Any] = max_resolution
lowerCAmelCase_ : Tuple = do_resize
lowerCAmelCase_ : Any = size if size is not None else {'''height''': 1_8, '''width''': 2_0}
lowerCAmelCase_ : Union[str, Any] = do_thumbnail
lowerCAmelCase_ : Tuple = do_align_axis
lowerCAmelCase_ : List[Any] = do_pad
lowerCAmelCase_ : List[Any] = do_normalize
lowerCAmelCase_ : Tuple = image_mean
lowerCAmelCase_ : int = image_std
def UpperCAmelCase__ ( self : List[Any]):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __snake_case ( snake_case_ ,unittest.TestCase ):
_a = DonutImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : Union[str, Any] = DonutImageProcessingTester(self)
@property
def UpperCAmelCase__ ( self : int):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Optional[int]):
lowerCAmelCase_ : int = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(A_ , '''do_resize'''))
self.assertTrue(hasattr(A_ , '''size'''))
self.assertTrue(hasattr(A_ , '''do_thumbnail'''))
self.assertTrue(hasattr(A_ , '''do_align_long_axis'''))
self.assertTrue(hasattr(A_ , '''do_pad'''))
self.assertTrue(hasattr(A_ , '''do_normalize'''))
self.assertTrue(hasattr(A_ , '''image_mean'''))
self.assertTrue(hasattr(A_ , '''image_std'''))
def UpperCAmelCase__ ( self : Optional[Any]):
lowerCAmelCase_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''height''': 1_8, '''width''': 2_0})
lowerCAmelCase_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2)
self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2})
# Previous config had dimensions in (width, height) order
lowerCAmelCase_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=(4_2, 8_4))
self.assertEqual(image_processor.size , {'''height''': 8_4, '''width''': 4_2})
def UpperCAmelCase__ ( self : Dict):
pass
@is_flaky()
def UpperCAmelCase__ ( self : Dict):
lowerCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowerCAmelCase_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_)
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image)
# Test not batched input
lowerCAmelCase_ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : int = image_processing(A_ , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowerCAmelCase_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_)
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray)
# Test not batched input
lowerCAmelCase_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : Any = image_processing(A_ , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def UpperCAmelCase__ ( self : Optional[int]):
lowerCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowerCAmelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_)
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor)
# Test not batched input
lowerCAmelCase_ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : Dict = image_processing(A_ , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 171 |
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ :
def __init__( self , A_ = None , A_ = None , A_=None , A_=None )-> Optional[Any]:
'''simple docstring'''
if not conversation_id:
UpperCamelCase = uuid.uuida()
if past_user_inputs is None:
UpperCamelCase = []
if generated_responses is None:
UpperCamelCase = []
UpperCamelCase = conversation_id
UpperCamelCase = past_user_inputs
UpperCamelCase = generated_responses
UpperCamelCase = text
def __eq__( self , A_ )-> List[Any]:
'''simple docstring'''
if not isinstance(A_ , A_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def UpperCAmelCase_ ( self , A_ , A_ = False )-> int:
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '''
F'''with: "{text}".''' )
UpperCamelCase = text
else:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" new input '''
F'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
UpperCamelCase = text
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
UpperCamelCase = None
def UpperCAmelCase_ ( self , A_ )-> int:
'''simple docstring'''
self.generated_responses.append(A_ )
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self )-> Any:
'''simple docstring'''
UpperCamelCase = F'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
UpperCamelCase = 'user' if is_user else 'bot'
output += F'''{name} >> {text} \n'''
return output
@add_end_docstrings(
snake_case_ , R"""
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
""" , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , *A_ , **A_ )-> Any:
'''simple docstring'''
super().__init__(*A_ , **A_ )
if self.tokenizer.pad_token_id is None:
UpperCamelCase = self.tokenizer.eos_token
def UpperCAmelCase_ ( self , A_=None , A_=None , A_=None , **A_ )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = {}
UpperCamelCase = {}
UpperCamelCase = {}
if min_length_for_response is not None:
UpperCamelCase = min_length_for_response
if minimum_tokens is not None:
UpperCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
UpperCamelCase = generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
UpperCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(A_ )
return preprocess_params, forward_params, postprocess_params
def __call__( self , A_ , A_=0 , **A_ )-> Any:
'''simple docstring'''
UpperCamelCase = super().__call__(A_ , num_workers=A_ , **A_ )
if isinstance(A_ , A_ ) and len(A_ ) == 1:
return outputs[0]
return outputs
def UpperCAmelCase_ ( self , A_ , A_=32 )-> Dict[str, Any]:
'''simple docstring'''
if not isinstance(A_ , A_ ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
F'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
UpperCamelCase = self.tokenizer._build_conversation_input_ids(A_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
UpperCamelCase = self._legacy_parse_and_tokenize(A_ )
if self.framework == "pt":
UpperCamelCase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
UpperCamelCase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def UpperCAmelCase_ ( self , A_ , A_=10 , **A_ )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = generate_kwargs.get('max_length' , self.model.config.max_length )
UpperCamelCase = model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
UpperCamelCase = max_length - minimum_tokens
UpperCamelCase = model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
UpperCamelCase = model_inputs['attention_mask'][:, -trim:]
UpperCamelCase = model_inputs.pop('conversation' )
UpperCamelCase = max_length
UpperCamelCase = self.model.generate(**A_ , **A_ )
if self.model.config.is_encoder_decoder:
UpperCamelCase = 1
else:
UpperCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def UpperCAmelCase_ ( self , A_ , A_=True )-> Tuple:
'''simple docstring'''
UpperCamelCase = model_outputs['output_ids']
UpperCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=A_ , clean_up_tokenization_spaces=A_ , )
UpperCamelCase = model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(A_ )
return conversation
def UpperCAmelCase_ ( self , A_ )-> Dict:
'''simple docstring'''
UpperCamelCase = self.tokenizer.eos_token_id
UpperCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(A_ , add_special_tokens=A_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(A_ , add_special_tokens=A_ ) )
if len(A_ ) > self.tokenizer.model_max_length:
UpperCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 3 | 0 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__: str = logging.get_logger(__name__)
lowerCAmelCase__: Tuple = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
lowerCAmelCase__: Dict = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
lowerCAmelCase__: Optional[int] = {'facebook/blenderbot_small-90M': 512}
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> List[str]:
SCREAMING_SNAKE_CASE_ : Optional[int] = set()
SCREAMING_SNAKE_CASE_ : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = char
SCREAMING_SNAKE_CASE_ : int = set(SCREAMING_SNAKE_CASE )
return pairs
class snake_case_ ( snake_case_ ):
__lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
__lowerCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : int = ['input_ids', 'attention_mask']
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase="__start__" , __lowerCAmelCase="__end__" , __lowerCAmelCase="__unk__" , __lowerCAmelCase="__null__" , **__lowerCAmelCase , ):
super().__init__(unk_token=A_ , bos_token=A_ , eos_token=A_ , pad_token=A_ , **A_ )
with open(A_ , encoding='utf-8' ) as vocab_handle:
SCREAMING_SNAKE_CASE_ : str = json.load(A_ )
SCREAMING_SNAKE_CASE_ : Tuple = {v: k for k, v in self.encoder.items()}
with open(A_ , encoding='utf-8' ) as merges_handle:
SCREAMING_SNAKE_CASE_ : Any = merges_handle.read().split('\n' )[1:-1]
SCREAMING_SNAKE_CASE_ : int = [tuple(merge.split() ) for merge in merges]
SCREAMING_SNAKE_CASE_ : List[str] = dict(zip(A_ , range(len(A_ ) ) ) )
SCREAMING_SNAKE_CASE_ : str = {}
@property
def __A ( self ):
return len(self.encoder )
def __A ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __A ( self , __lowerCAmelCase ):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_ : str = re.sub('([.,!?()])' , r' \1' , A_ )
SCREAMING_SNAKE_CASE_ : int = re.sub('(\')' , r' \1 ' , A_ )
SCREAMING_SNAKE_CASE_ : str = re.sub(r'\s{2,}' , ' ' , A_ )
if "\n" in token:
SCREAMING_SNAKE_CASE_ : Optional[Any] = token.replace('\n' , ' __newln__' )
SCREAMING_SNAKE_CASE_ : Any = token.split(' ' )
SCREAMING_SNAKE_CASE_ : Tuple = []
for token in tokens:
if not len(A_ ):
continue
SCREAMING_SNAKE_CASE_ : List[str] = token.lower()
SCREAMING_SNAKE_CASE_ : Any = tuple(A_ )
SCREAMING_SNAKE_CASE_ : int = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
SCREAMING_SNAKE_CASE_ : Dict = get_pairs(A_ )
if not pairs:
words.append(A_ )
continue
while True:
SCREAMING_SNAKE_CASE_ : Tuple = min(A_ , key=lambda __lowerCAmelCase : self.bpe_ranks.get(A_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = bigram
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : List[str] = 0
while i < len(A_ ):
try:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = word.index(A_ , A_ )
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE_ : Dict = tuple(A_ )
SCREAMING_SNAKE_CASE_ : Any = new_word
if len(A_ ) == 1:
break
else:
SCREAMING_SNAKE_CASE_ : str = get_pairs(A_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = '@@ '.join(A_ )
SCREAMING_SNAKE_CASE_ : str = word[:-4]
SCREAMING_SNAKE_CASE_ : List[Any] = word
words.append(A_ )
return " ".join(A_ )
def __A ( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.findall(r'\S+\n?' , A_ )
for token in words:
split_tokens.extend(list(self.bpe(A_ ).split(' ' ) ) )
return split_tokens
def __A ( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = token.lower()
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def __A ( self , __lowerCAmelCase ):
return self.decoder.get(A_ , self.unk_token )
def __A ( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Tuple = ' '.join(A_ ).replace('@@ ' , '' ).strip()
return out_string
def __A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
if not os.path.isdir(A_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : Any = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(A_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + '\n' )
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
with open(A_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
SCREAMING_SNAKE_CASE_ : Tuple = token_index
writer.write(' '.join(A_ ) + '\n' )
index += 1
return vocab_file, merge_file
| 345 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
lowerCAmelCase : List[Any] = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
lowerCAmelCase : List[Any] = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
lowerCAmelCase : Tuple = BeautifulSoup(res.text, 'html.parser')
lowerCAmelCase : List[Any] = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f"""https://google.com{link.get('href')}""")
| 3 | 0 |
def __SCREAMING_SNAKE_CASE ( a__ : int ) -> str:
__A : Optional[Any] = int(a__ )
if decimal in (0, 1): # Exit cases for the recursion
return str(a__ )
__A , __A : Any = divmod(a__ ,2 )
return binary_recursive(a__ ) + str(a__ )
def __SCREAMING_SNAKE_CASE ( a__ : str ) -> Optional[int]:
__A : str = str(a__ ).strip()
if not number:
raise ValueError("""No input value was provided""" )
__A : int = """-""" if number.startswith("""-""" ) else """"""
__A : List[str] = number.lstrip("""-""" )
if not number.isnumeric():
raise ValueError("""Input value is not an integer""" )
return f"""{negative}0b{binary_recursive(int(a__ ) )}"""
if __name__ == "__main__":
from doctest import testmod
testmod()
| 17 |
'''simple docstring'''
import numpy as np
def A_( A : str , A : Optional[Any] , A : Tuple , A : Optional[int] , A : str):
UpperCamelCase = int(np.ceil((x_end - xa) / h))
UpperCamelCase = np.zeros((n + 1,))
UpperCamelCase = ya
UpperCamelCase = xa
for k in range(A):
UpperCamelCase = f(A , y[k])
UpperCamelCase = f(x + 0.5 * h , y[k] + 0.5 * h * ka)
UpperCamelCase = f(x + 0.5 * h , y[k] + 0.5 * h * ka)
UpperCamelCase = f(x + h , y[k] + h * ka)
UpperCamelCase = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k',
'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v',
'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q',
'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u',
'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v',
'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out',
'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos',
'self_attn.rotary_emb': 'encoder.embed_positions',
'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm',
'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1',
'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2',
'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv',
'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm',
'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm',
'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense',
'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense',
'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm',
'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense',
'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense',
'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
SCREAMING_SNAKE_CASE_ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def UpperCamelCase__ ( _lowercase : Union[str, Any] , _lowercase : List[str] , _lowercase : Dict , _lowercase : Tuple , _lowercase : Any ) -> List[str]:
for attribute in key.split(""".""" ):
__UpperCAmelCase: Optional[Any] = getattr(_lowercase , _lowercase )
if weight_type is not None:
__UpperCAmelCase: str = getattr(_lowercase , _lowercase ).shape
else:
__UpperCAmelCase: int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
__UpperCAmelCase: Union[str, Any] = value
elif weight_type == "weight_g":
__UpperCAmelCase: Optional[Any] = value
elif weight_type == "weight_v":
__UpperCAmelCase: List[Any] = value
elif weight_type == "bias":
__UpperCAmelCase: str = value
elif weight_type == "running_mean":
__UpperCAmelCase: List[str] = value
elif weight_type == "running_var":
__UpperCAmelCase: Any = value
elif weight_type == "num_batches_tracked":
__UpperCAmelCase: List[Any] = value
elif weight_type == "inv_freq":
__UpperCAmelCase: str = value
else:
__UpperCAmelCase: Optional[Any] = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def UpperCamelCase__ ( _lowercase : int , _lowercase : Union[str, Any] , _lowercase : str ) -> List[Any]:
__UpperCAmelCase: int = []
__UpperCAmelCase: str = fairseq_model.state_dict()
__UpperCAmelCase: Dict = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__UpperCAmelCase: Any = False
if "conv_layers" in name:
load_conv_layer(
_lowercase , _lowercase , _lowercase , _lowercase , hf_model.config.feat_extract_norm == """group""" , )
__UpperCAmelCase: Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
__UpperCAmelCase: Optional[int] = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__UpperCAmelCase: List[str] = True
if "*" in mapped_key:
__UpperCAmelCase: str = name.split(_lowercase )[0].split(""".""" )[-2]
__UpperCAmelCase: Any = mapped_key.replace("""*""" , _lowercase )
if "pos_bias_u" in name:
__UpperCAmelCase: Optional[Any] = None
elif "pos_bias_v" in name:
__UpperCAmelCase: List[Any] = None
elif "weight_g" in name:
__UpperCAmelCase: str = """weight_g"""
elif "weight_v" in name:
__UpperCAmelCase: List[str] = """weight_v"""
elif "bias" in name:
__UpperCAmelCase: List[Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__UpperCAmelCase: Optional[Any] = """weight"""
elif "running_mean" in name:
__UpperCAmelCase: Union[str, Any] = """running_mean"""
elif "inv_freq" in name:
__UpperCAmelCase: Optional[int] = """inv_freq"""
elif "running_var" in name:
__UpperCAmelCase: str = """running_var"""
elif "num_batches_tracked" in name:
__UpperCAmelCase: Dict = """num_batches_tracked"""
else:
__UpperCAmelCase: Optional[Any] = None
set_recursively(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
continue
if not is_used:
unused_weights.append(_lowercase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def UpperCamelCase__ ( _lowercase : str , _lowercase : int , _lowercase : int , _lowercase : List[str] , _lowercase : Tuple ) -> List[str]:
__UpperCAmelCase: List[str] = full_name.split("""conv_layers.""" )[-1]
__UpperCAmelCase: Dict = name.split(""".""" )
__UpperCAmelCase: Union[str, Any] = int(items[0] )
__UpperCAmelCase: List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
__UpperCAmelCase: Dict = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
__UpperCAmelCase: Optional[Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
__UpperCAmelCase: Dict = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
__UpperCAmelCase: str = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowercase )
@torch.no_grad()
def UpperCamelCase__ ( _lowercase : Dict , _lowercase : List[str] , _lowercase : Any=None , _lowercase : str=None , _lowercase : Optional[int]=True ) -> Tuple:
if config_path is not None:
__UpperCAmelCase: Optional[int] = WavaVecaConformerConfig.from_pretrained(_lowercase , hidden_act="""swish""" )
else:
__UpperCAmelCase: Dict = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__UpperCAmelCase: Dict = """rotary"""
if is_finetuned:
if dict_path:
__UpperCAmelCase: Dict = Dictionary.load(_lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCAmelCase: Optional[int] = target_dict.pad_index
__UpperCAmelCase: Union[str, Any] = target_dict.bos_index
__UpperCAmelCase: int = target_dict.eos_index
__UpperCAmelCase: int = len(target_dict.symbols )
__UpperCAmelCase: Tuple = os.path.join(_lowercase , """vocab.json""" )
if not os.path.isdir(_lowercase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_lowercase ) )
return
os.makedirs(_lowercase , exist_ok=_lowercase )
__UpperCAmelCase: Union[str, Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
__UpperCAmelCase: Optional[Any] = 0
__UpperCAmelCase: List[Any] = 1
with open(_lowercase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(_lowercase , _lowercase )
__UpperCAmelCase: Tuple = WavaVecaCTCTokenizer(
_lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_lowercase , )
__UpperCAmelCase: Any = True if config.feat_extract_norm == """layer""" else False
__UpperCAmelCase: int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=_lowercase , return_attention_mask=_lowercase , )
__UpperCAmelCase: List[Any] = WavaVecaProcessor(feature_extractor=_lowercase , tokenizer=_lowercase )
processor.save_pretrained(_lowercase )
__UpperCAmelCase: List[Any] = WavaVecaConformerForCTC(_lowercase )
else:
__UpperCAmelCase: Dict = WavaVecaConformerForPreTraining(_lowercase )
if is_finetuned:
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase: List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__UpperCAmelCase: Dict = argparse.Namespace(task="""audio_pretraining""" )
__UpperCAmelCase: Any = fairseq.tasks.setup_task(_lowercase )
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase: str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowercase )
__UpperCAmelCase: Dict = model[0].eval()
recursively_load_weights(_lowercase , _lowercase , not is_finetuned )
hf_wavavec.save_pretrained(_lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 523 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case_)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True})
lowerCAmelCase_ = Features({"""text""": Value("""string""")})
lowerCAmelCase_ = Features({})
lowerCAmelCase_ = "text"
@property
def UpperCAmelCase_ ( self )-> Dict[str, str]:
'''simple docstring'''
return {self.text_column: "text"}
| 3 | 0 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
lowercase__ : List[str] = '\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n'
lowercase__ : Optional[Any] = '\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n'
lowercase__ : Optional[int] = '\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "pearson": Pearson Correlation\n "spearmanr": Spearman Correlation\n "matthews_correlation": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
return float((preds == labels).mean() )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
snake_case_ = simple_accuracy(_A , _A )
snake_case_ = float(fa_score(y_true=_A , y_pred=_A ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
snake_case_ = float(pearsonr(_A , _A )[0] )
snake_case_ = float(spearmanr(_A , _A )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def snake_case__ ( self : List[str] ):
"""simple docstring"""
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", "
"\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "stsb" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "stsb" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def snake_case__ ( self : Dict , __lowercase : Union[str, Any] , __lowercase : Dict ):
"""simple docstring"""
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(A_ , A_ )}
elif self.config_name == "stsb":
return pearson_and_spearman(A_ , A_ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(A_ , A_ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(A_ , A_ )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", "
"\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]" )
| 376 |
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase : Union[str, Any] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
lowerCAmelCase : List[str] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def A_( A : list[float]):
UpperCamelCase = []
UpperCamelCase = len(A)
for i in range(A):
UpperCamelCase = -1
for j in range(i + 1 , A):
if arr[i] < arr[j]:
UpperCamelCase = arr[j]
break
result.append(A)
return result
def A_( A : list[float]):
UpperCamelCase = []
for i, outer in enumerate(A):
UpperCamelCase = -1
for inner in arr[i + 1 :]:
if outer < inner:
UpperCamelCase = inner
break
result.append(A)
return result
def A_( A : list[float]):
UpperCamelCase = len(A)
UpperCamelCase = []
UpperCamelCase = [-1] * arr_size
for index in reversed(range(A)):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
UpperCamelCase = stack[-1]
stack.append(arr[index])
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
lowerCAmelCase : Optional[Any] = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 3 | 0 |
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
A__ : int = 'scheduler_config.json'
class snake_case__ ( snake_case_ ):
A__ = 1
A__ = 2
A__ = 3
A__ = 4
A__ = 5
@dataclass
class snake_case__ ( snake_case_ ):
A__ = 42
class snake_case__ :
A__ = SCHEDULER_CONFIG_NAME
A__ = ['''dtype''']
A__ = []
A__ = True
@classmethod
def A_ ( cls : Optional[int] , __a : Optional[Any] = None , __a : Tuple = None , __a : Optional[int]=False , **__a : List[Any] , ) -> Tuple:
'''simple docstring'''
__snake_case , __snake_case : int = cls.load_config(
pretrained_model_name_or_path=A_ , subfolder=A_ , return_unused_kwargs=A_ , **A_ , )
__snake_case , __snake_case : int = cls.from_config(A_ , return_unused_kwargs=A_ , **A_ )
if hasattr(A_ , 'create_state' ) and getattr(A_ , 'has_state' , A_ ):
__snake_case : List[str] = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def A_ ( self : Optional[Any] , __a : int , __a : Any = False , **__a : Optional[int] ) -> Any:
'''simple docstring'''
self.save_config(save_directory=A_ , push_to_hub=A_ , **A_ )
@property
def A_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
return self._get_compatibles()
@classmethod
def A_ ( cls : List[str] ) -> Any:
'''simple docstring'''
__snake_case : str = list(set([cls.__name__] + cls._compatibles ) )
__snake_case : Tuple = importlib.import_module(__name__.split('.' )[0] )
__snake_case : Union[str, Any] = [
getattr(A_ , A_ ) for c in compatible_classes_str if hasattr(A_ , A_ )
]
return compatible_classes
def a_ ( _UpperCAmelCase : jnp.ndarray ,_UpperCAmelCase : Tuple[int] ) -> int:
assert len(_UpperCAmelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_UpperCAmelCase ) - x.ndim) ) ,_UpperCAmelCase )
def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int=0.9_9_9 ,_UpperCAmelCase : List[Any]=jnp.floataa ) -> Tuple:
def alpha_bar(_UpperCAmelCase : Union[str, Any] ):
return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
__snake_case : Optional[Any] = []
for i in range(_UpperCAmelCase ):
__snake_case : Optional[Any] = i / num_diffusion_timesteps
__snake_case : Optional[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_UpperCAmelCase ) / alpha_bar(_UpperCAmelCase ) ,_UpperCAmelCase ) )
return jnp.array(_UpperCAmelCase ,dtype=_UpperCAmelCase )
@flax.struct.dataclass
class snake_case__ :
A__ = 42
A__ = 42
A__ = 42
@classmethod
def A_ ( cls : List[Any] , __a : List[Any] ) -> str:
'''simple docstring'''
__snake_case : Dict = scheduler.config
if config.trained_betas is not None:
__snake_case : Any = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
__snake_case : Union[str, Any] = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__snake_case : int = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__snake_case : int = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
__snake_case : List[str] = 1.0 - betas
__snake_case : Optional[Any] = jnp.cumprod(A_ , axis=0 )
return cls(
alphas=A_ , betas=A_ , alphas_cumprod=A_ , )
def a_ ( _UpperCAmelCase : CommonSchedulerState ,_UpperCAmelCase : jnp.ndarray ,_UpperCAmelCase : jnp.ndarray ,_UpperCAmelCase : jnp.ndarray ) -> Optional[int]:
__snake_case : Optional[int] = state.alphas_cumprod
__snake_case : Any = alphas_cumprod[timesteps] ** 0.5
__snake_case : Tuple = sqrt_alpha_prod.flatten()
__snake_case : Tuple = broadcast_to_shape_from_left(_UpperCAmelCase ,original_samples.shape )
__snake_case : Tuple = (1 - alphas_cumprod[timesteps]) ** 0.5
__snake_case : Optional[Any] = sqrt_one_minus_alpha_prod.flatten()
__snake_case : Optional[Any] = broadcast_to_shape_from_left(_UpperCAmelCase ,original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def a_ ( _UpperCAmelCase : CommonSchedulerState ,_UpperCAmelCase : jnp.ndarray ,_UpperCAmelCase : jnp.ndarray ,_UpperCAmelCase : jnp.ndarray ) -> Any:
__snake_case , __snake_case : int = get_sqrt_alpha_prod(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
__snake_case : Dict = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def a_ ( _UpperCAmelCase : CommonSchedulerState ,_UpperCAmelCase : jnp.ndarray ,_UpperCAmelCase : jnp.ndarray ,_UpperCAmelCase : jnp.ndarray ) -> int:
__snake_case , __snake_case : int = get_sqrt_alpha_prod(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
__snake_case : List[Any] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 286 |
'''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def A_( A : str):
if not sentence:
return ""
UpperCamelCase = dict(zip(A , A))
return lower_to_upper.get(sentence[0] , sentence[0]) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 3 | 0 |
from abc import ABC, abstractmethod
from typing import List, Optional
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
"""simple docstring"""
def __init__( self : List[Any] ):
self.test()
def _lowercase ( self : List[Any] ):
snake_case__ : List[Any] = 0
snake_case__ : Union[str, Any] = False
while not completed:
if counter == 1:
self.reset()
snake_case__ : int = self.advance()
if not self.does_advance(A_ ):
raise Exception(
"Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true." )
snake_case__, snake_case__, snake_case__ : List[Any] = self.update(A_ )
counter += 1
if counter > 1_0_0_0_0:
raise Exception("update() does not fulfill the constraint." )
if self.remaining() != 0:
raise Exception("Custom Constraint is not defined correctly." )
@abstractmethod
def _lowercase ( self : str ):
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def _lowercase ( self : int , __A : Dict ):
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def _lowercase ( self : Union[str, Any] , __A : Any ):
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def _lowercase ( self : Dict ):
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def _lowercase ( self : Tuple ):
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def _lowercase ( self : List[str] , __A : Optional[int]=False ):
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
"""simple docstring"""
def __init__( self : List[Any] , __A : Optional[int] ):
super(A_ , self ).__init__()
if not isinstance(A_ , A_ ) or len(A_ ) == 0:
raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(A_ , A_ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
snake_case__ : Union[str, Any] = token_ids
snake_case__ : Optional[Any] = len(self.token_ids )
snake_case__ : str = -1 # the index of the currently fulfilled step
snake_case__ : int = False
def _lowercase ( self : int ):
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def _lowercase ( self : Optional[Any] , __A : str ):
if not isinstance(A_ , A_ ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(A_ )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def _lowercase ( self : Tuple , __A : Dict ):
if not isinstance(A_ , A_ ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(A_ )}''' )
snake_case__ : List[str] = False
snake_case__ : Dict = False
snake_case__ : List[str] = False
if self.does_advance(A_ ):
self.fulfilled_idx += 1
snake_case__ : int = True
if self.fulfilled_idx == (self.seqlen - 1):
snake_case__ : Optional[int] = True
snake_case__ : Any = completed
else:
# failed to make progress.
snake_case__ : Any = True
self.reset()
return stepped, completed, reset
def _lowercase ( self : Optional[Any] ):
snake_case__ : List[Any] = False
snake_case__ : List[Any] = 0
def _lowercase ( self : List[str] ):
return self.seqlen - (self.fulfilled_idx + 1)
def _lowercase ( self : str , __A : Tuple=False ):
snake_case__ : int = PhrasalConstraint(self.token_ids )
if stateful:
snake_case__ : Dict = self.seqlen
snake_case__ : Dict = self.fulfilled_idx
snake_case__ : Tuple = self.completed
return new_constraint
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __A : Union[str, Any] , __A : Dict=True ):
snake_case__ : Optional[Any] = max([len(A_ ) for one in nested_token_ids] )
snake_case__ : int = {}
for token_ids in nested_token_ids:
snake_case__ : List[Any] = root
for tidx, token_id in enumerate(A_ ):
if token_id not in level:
snake_case__ : int = {}
snake_case__ : Optional[int] = level[token_id]
if no_subsets and self.has_subsets(A_ , A_ ):
raise ValueError(
"Each list in `nested_token_ids` can\'t be a complete subset of another list, but is"
f''' {nested_token_ids}.''' )
snake_case__ : Optional[Any] = root
def _lowercase ( self : List[str] , __A : str ):
snake_case__ : str = self.trie
for current_token in current_seq:
snake_case__ : Union[str, Any] = start[current_token]
snake_case__ : Optional[int] = list(start.keys() )
return next_tokens
def _lowercase ( self : List[Any] , __A : Optional[Any] ):
snake_case__ : List[str] = self.next_tokens(A_ )
return len(A_ ) == 0
def _lowercase ( self : Optional[Any] , __A : Dict ):
snake_case__ : Tuple = list(root.values() )
if len(A_ ) == 0:
return 1
else:
return sum([self.count_leaves(A_ ) for nn in next_nodes] )
def _lowercase ( self : Tuple , __A : str , __A : Dict ):
snake_case__ : int = self.count_leaves(A_ )
return len(A_ ) != leaf_count
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
"""simple docstring"""
def __init__( self : Dict , __A : Tuple ):
super(A_ , self ).__init__()
if not isinstance(A_ , A_ ) or len(A_ ) == 0:
raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(A_ , A_ ) for token_ids in nested_token_ids ):
raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(A_ , A_ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
snake_case__ : Any = DisjunctiveTrie(A_ )
snake_case__ : Optional[Any] = nested_token_ids
snake_case__ : Tuple = self.trie.max_height
snake_case__ : Union[str, Any] = []
snake_case__ : int = False
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Optional[int] = self.trie.next_tokens(self.current_seq )
if len(A_ ) == 0:
return None
else:
return token_list
def _lowercase ( self : Any , __A : List[Any] ):
if not isinstance(A_ , A_ ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(A_ )}''' )
snake_case__ : str = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def _lowercase ( self : List[str] , __A : List[str] ):
if not isinstance(A_ , A_ ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(A_ )}''' )
snake_case__ : Dict = False
snake_case__ : Optional[Any] = False
snake_case__ : Any = False
if self.does_advance(A_ ):
self.current_seq.append(A_ )
snake_case__ : Optional[Any] = True
else:
snake_case__ : Any = True
self.reset()
snake_case__ : str = self.trie.reached_leaf(self.current_seq )
snake_case__ : Dict = completed
return stepped, completed, reset
def _lowercase ( self : str ):
snake_case__ : List[Any] = False
snake_case__ : Optional[Any] = []
def _lowercase ( self : Any ):
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def _lowercase ( self : List[Any] , __A : Optional[int]=False ):
snake_case__ : Tuple = DisjunctiveConstraint(self.token_ids )
if stateful:
snake_case__ : Dict = self.seqlen
snake_case__ : Tuple = self.current_seq
snake_case__ : List[Any] = self.completed
return new_constraint
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Optional[int] , __A : List[str] ):
snake_case__ : List[str] = constraints
# max # of steps required to fulfill a given constraint
snake_case__ : Any = max([c.seqlen for c in constraints] )
snake_case__ : Optional[int] = len(A_ )
snake_case__ : Any = False
self.init_state()
def _lowercase ( self : Optional[int] ):
snake_case__ : Optional[Any] = []
snake_case__ : Tuple = None
snake_case__ : Any = [constraint.copy(stateful=A_ ) for constraint in self.constraints]
def _lowercase ( self : Any ):
snake_case__ : Dict = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def _lowercase ( self : Union[str, Any] ):
snake_case__ : int = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
snake_case__ : int = constraint.advance()
if isinstance(A_ , A_ ):
token_list.append(A_ )
elif isinstance(A_ , A_ ):
token_list.extend(A_ )
else:
snake_case__ : List[str] = self.inprogress_constraint.advance()
if isinstance(A_ , A_ ):
token_list.append(A_ )
elif isinstance(A_ , A_ ):
token_list.extend(A_ )
if len(A_ ) == 0:
return None
else:
return token_list
def _lowercase ( self : int , __A : List[Any] ):
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
snake_case__, snake_case__ : List[str] = self.add(A_ )
# the entire list of constraints are fulfilled
if self.completed:
break
def _lowercase ( self : Any , __A : str ):
if not isinstance(A_ , A_ ):
raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''' )
snake_case__, snake_case__ : int = False, False
if self.completed:
snake_case__ : List[str] = True
snake_case__ : Any = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
snake_case__, snake_case__, snake_case__ : Optional[Any] = self.inprogress_constraint.update(A_ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=A_ ) )
snake_case__ : List[str] = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
snake_case__ : str = None
if len(self.pending_constraints ) == 0:
# we're done!
snake_case__ : Any = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(A_ ):
snake_case__, snake_case__, snake_case__ : List[Any] = pending_constraint.update(A_ )
if not stepped:
raise Exception(
"`constraint.update(token_id)` is not yielding incremental progress, "
"even though `constraint.does_advance(token_id)` is true." )
if complete:
self.complete_constraints.append(A_ )
snake_case__ : str = None
if not complete and stepped:
snake_case__ : Union[str, Any] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
snake_case__ : Optional[Any] = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
snake_case__ : str = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def _lowercase ( self : int , __A : str=True ):
snake_case__ : Optional[int] = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
snake_case__ : int = [
constraint.copy(stateful=A_ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
snake_case__ : Tuple = self.inprogress_constraint.copy(stateful=A_ )
snake_case__ : Tuple = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 297 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCAmelCase : Dict = logging.get_logger(__name__)
# General docstring
lowerCAmelCase : str = 'RegNetConfig'
# Base docstring
lowerCAmelCase : str = 'facebook/regnet-y-040'
lowerCAmelCase : Dict = [1, 10_88, 7, 7]
# Image classification docstring
lowerCAmelCase : Dict = 'facebook/regnet-y-040'
lowerCAmelCase : int = 'tabby, tabby cat'
lowerCAmelCase : int = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ = 3 , A_ = 1 , A_ = 1 , A_ = "relu" , **A_ , )-> str:
'''simple docstring'''
super().__init__(**A_ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
UpperCamelCase = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
UpperCamelCase = tf.keras.layers.ConvaD(
filters=A_ , kernel_size=A_ , strides=A_ , padding='VALID' , groups=A_ , use_bias=A_ , name='convolution' , )
UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='normalization' )
UpperCamelCase = ACTaFN[activation] if activation is not None else tf.identity
def UpperCAmelCase_ ( self , A_ )-> Any:
'''simple docstring'''
UpperCamelCase = self.convolution(self.padding(A_ ) )
UpperCamelCase = self.normalization(A_ )
UpperCamelCase = self.activation(A_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , **A_ )-> Optional[Any]:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = config.num_channels
UpperCamelCase = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def UpperCAmelCase_ ( self , A_ )-> List[Any]:
'''simple docstring'''
UpperCamelCase = shape_list(A_ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
UpperCamelCase = tf.transpose(A_ , perm=(0, 2, 3, 1) )
UpperCamelCase = self.embedder(A_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ = 2 , **A_ )-> List[Any]:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = tf.keras.layers.ConvaD(
filters=A_ , kernel_size=1 , strides=A_ , use_bias=A_ , name='convolution' )
UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='normalization' )
def UpperCAmelCase_ ( self , A_ , A_ = False )-> tf.Tensor:
'''simple docstring'''
return self.normalization(self.convolution(A_ ) , training=A_ )
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ , **A_ )-> Optional[Any]:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A_ , name='pooler' )
UpperCamelCase = [
tf.keras.layers.ConvaD(filters=A_ , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=A_ , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def UpperCAmelCase_ ( self , A_ )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.pooler(A_ )
for layer_module in self.attention:
UpperCamelCase = layer_module(A_ )
UpperCamelCase = hidden_state * pooled
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ , A_ , A_ = 1 , **A_ )-> Dict:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = in_channels != out_channels or stride != 1
UpperCamelCase = max(1 , out_channels // config.groups_width )
UpperCamelCase = (
TFRegNetShortCut(A_ , stride=A_ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
UpperCamelCase = [
TFRegNetConvLayer(A_ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
A_ , stride=A_ , groups=A_ , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(A_ , kernel_size=1 , activation=A_ , name='layer.2' ),
]
UpperCamelCase = ACTaFN[config.hidden_act]
def UpperCAmelCase_ ( self , A_ )-> Tuple:
'''simple docstring'''
UpperCamelCase = hidden_state
for layer_module in self.layers:
UpperCamelCase = layer_module(A_ )
UpperCamelCase = self.shortcut(A_ )
hidden_state += residual
UpperCamelCase = self.activation(A_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ , A_ , A_ = 1 , **A_ )-> Any:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = in_channels != out_channels or stride != 1
UpperCamelCase = max(1 , out_channels // config.groups_width )
UpperCamelCase = (
TFRegNetShortCut(A_ , stride=A_ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
UpperCamelCase = [
TFRegNetConvLayer(A_ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
A_ , stride=A_ , groups=A_ , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(A_ , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(A_ , kernel_size=1 , activation=A_ , name='layer.3' ),
]
UpperCamelCase = ACTaFN[config.hidden_act]
def UpperCAmelCase_ ( self , A_ )-> List[Any]:
'''simple docstring'''
UpperCamelCase = hidden_state
for layer_module in self.layers:
UpperCamelCase = layer_module(A_ )
UpperCamelCase = self.shortcut(A_ )
hidden_state += residual
UpperCamelCase = self.activation(A_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ , A_ , A_ = 2 , A_ = 2 , **A_ )-> Dict:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
UpperCamelCase = [
# downsampling is done in the first layer with stride of 2
layer(A_ , A_ , A_ , stride=A_ , name='layers.0' ),
*[layer(A_ , A_ , A_ , name=F'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def UpperCAmelCase_ ( self , A_ )-> List[Any]:
'''simple docstring'''
for layer_module in self.layers:
UpperCamelCase = layer_module(A_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , **A_ )-> str:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
A_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(A_ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(A_ , A_ , A_ , depth=A_ , name=F'''stages.{i+1}''' ) )
def UpperCAmelCase_ ( self , A_ , A_ = False , A_ = True )-> TFBaseModelOutputWithNoAttention:
'''simple docstring'''
UpperCamelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCamelCase = hidden_states + (hidden_state,)
UpperCamelCase = stage_module(A_ )
if output_hidden_states:
UpperCamelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=A_ , hidden_states=A_ )
@keras_serializable
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
lowerCAmelCase_ = RegNetConfig
def __init__( self , A_ , **A_ )-> Union[str, Any]:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = config
UpperCamelCase = TFRegNetEmbeddings(A_ , name='embedder' )
UpperCamelCase = TFRegNetEncoder(A_ , name='encoder' )
UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A_ , name='pooler' )
@unpack_inputs
def UpperCAmelCase_ ( self , A_ , A_ = None , A_ = None , A_ = False , )-> TFBaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.embedder(A_ , training=A_ )
UpperCamelCase = self.encoder(
A_ , output_hidden_states=A_ , return_dict=A_ , training=A_ )
UpperCamelCase = encoder_outputs[0]
UpperCamelCase = self.pooler(A_ )
# Change to NCHW output format have uniformity in the modules
UpperCamelCase = tf.transpose(A_ , perm=(0, 3, 1, 2) )
UpperCamelCase = tf.transpose(A_ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
UpperCamelCase = tuple([tf.transpose(A_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A_ , pooler_output=A_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = RegNetConfig
lowerCAmelCase_ = """regnet"""
lowerCAmelCase_ = """pixel_values"""
@property
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
lowerCAmelCase : str = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCAmelCase : List[str] = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , snake_case_ , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ , *A_ , **A_ )-> List[Any]:
'''simple docstring'''
super().__init__(A_ , *A_ , **A_ )
UpperCamelCase = TFRegNetMainLayer(A_ , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=A_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCAmelCase_ ( self , A_ , A_ = None , A_ = None , A_=False , )-> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
'''simple docstring'''
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.regnet(
pixel_values=A_ , output_hidden_states=A_ , return_dict=A_ , training=A_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , snake_case_ , )
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_):
def __init__( self , A_ , *A_ , **A_ )-> str:
'''simple docstring'''
super().__init__(A_ , *A_ , **A_ )
UpperCamelCase = config.num_labels
UpperCamelCase = TFRegNetMainLayer(A_ , name='regnet' )
# classification head
UpperCamelCase = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCAmelCase_ ( self , A_ = None , A_ = None , A_ = None , A_ = None , A_=False , )-> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
'''simple docstring'''
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.regnet(
A_ , output_hidden_states=A_ , return_dict=A_ , training=A_ )
UpperCamelCase = outputs.pooler_output if return_dict else outputs[1]
UpperCamelCase = self.classifier[0](A_ )
UpperCamelCase = self.classifier[1](A_ )
UpperCamelCase = None if labels is None else self.hf_compute_loss(labels=A_ , logits=A_ )
if not return_dict:
UpperCamelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=A_ , logits=A_ , hidden_states=outputs.hidden_states )
| 3 | 0 |
"""simple docstring"""
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
a_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class A_(snake_case_ , snake_case_ ):
"""simple docstring"""
@register_to_config
def __init__( self , A , A = None , A = None ):
super().__init__()
_lowerCamelCase : List[Any] = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
_lowerCamelCase : int = torch.zeros(A_ , A_ )
else:
_lowerCamelCase : int = None
_lowerCamelCase : str = torch.nn.Parameter(A_ )
class A_(snake_case_ ):
"""simple docstring"""
a_ : int = 42
a_ : str = 42
a_ : int = 42
a_ : int = 42
a_ : Optional[Any] = 42
a_ : int = 42
def __init__( self , A , A , A , A , A , A , ):
super().__init__()
self.register_modules(
vqvae=A_ , transformer=A_ , text_encoder=A_ , tokenizer=A_ , scheduler=A_ , learned_classifier_free_sampling_embeddings=A_ , )
def _lowerCAmelCase ( self , A , A , A ):
_lowerCamelCase : Any = len(A_ ) if isinstance(A_ , A_ ) else 1
# get prompt text embeddings
_lowerCamelCase : Optional[int] = self.tokenizer(
A_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
_lowerCamelCase : Any = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_lowerCamelCase : Optional[int] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
_lowerCamelCase : List[Any] = text_input_ids[:, : self.tokenizer.model_max_length]
_lowerCamelCase : Dict = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
_lowerCamelCase : Optional[int] = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=A_ )
# duplicate text embeddings for each generation per prompt
_lowerCamelCase : List[str] = prompt_embeds.repeat_interleave(A_ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
_lowerCamelCase : Tuple = self.learned_classifier_free_sampling_embeddings.embeddings
_lowerCamelCase : List[Any] = negative_prompt_embeds.unsqueeze(0 ).repeat(A_ , 1 , 1 )
else:
_lowerCamelCase : Optional[Any] = [''] * batch_size
_lowerCamelCase : Tuple = text_input_ids.shape[-1]
_lowerCamelCase : int = self.tokenizer(
A_ , padding='max_length' , max_length=A_ , truncation=A_ , return_tensors='pt' , )
_lowerCamelCase : Union[str, Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
_lowerCamelCase : Tuple = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=A_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_lowerCamelCase : List[str] = negative_prompt_embeds.shape[1]
_lowerCamelCase : Any = negative_prompt_embeds.repeat(1 , A_ , 1 )
_lowerCamelCase : Any = negative_prompt_embeds.view(batch_size * num_images_per_prompt , A_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCamelCase : Optional[int] = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , A , A = 100 , A = 5.0 , A = 1.0 , A = 1 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , ):
if isinstance(A_ , A_ ):
_lowerCamelCase : List[str] = 1
elif isinstance(A_ , A_ ):
_lowerCamelCase : Union[str, Any] = len(A_ )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(A_ )}" )
_lowerCamelCase : Optional[Any] = batch_size * num_images_per_prompt
_lowerCamelCase : List[str] = guidance_scale > 1.0
_lowerCamelCase : Tuple = self._encode_prompt(A_ , A_ , A_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A_ , A_ ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(A_ )}." )
# get the initial completely masked latents unless the user supplied it
_lowerCamelCase : List[Any] = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
_lowerCamelCase : Optional[int] = self.transformer.num_vector_embeds - 1
_lowerCamelCase : Dict = torch.full(A_ , A_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
F" {self.transformer.num_vector_embeds - 1} (inclusive)." )
_lowerCamelCase : str = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(A_ , device=self.device )
_lowerCamelCase : Optional[int] = self.scheduler.timesteps.to(self.device )
_lowerCamelCase : Optional[Any] = latents
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the sample if we are doing classifier free guidance
_lowerCamelCase : Dict = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
_lowerCamelCase : Optional[Any] = self.transformer(A_ , encoder_hidden_states=A_ , timestep=A_ ).sample
if do_classifier_free_guidance:
_lowerCamelCase , _lowerCamelCase : List[Any] = model_output.chunk(2 )
_lowerCamelCase : List[Any] = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(A_ , dim=1 , keepdim=A_ )
_lowerCamelCase : List[str] = self.truncate(A_ , A_ )
# remove `log(0)`'s (`-inf`s)
_lowerCamelCase : List[Any] = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCamelCase : Union[str, Any] = self.scheduler.step(A_ , timestep=A_ , sample=A_ , generator=A_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A_ , A_ , A_ )
_lowerCamelCase : Union[str, Any] = self.vqvae.config.vq_embed_dim
_lowerCamelCase : int = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
_lowerCamelCase : List[str] = self.vqvae.quantize.get_codebook_entry(A_ , shape=A_ )
_lowerCamelCase : Optional[int] = self.vqvae.decode(A_ , force_not_quantize=A_ ).sample
_lowerCamelCase : str = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCamelCase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCamelCase : List[str] = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
def _lowerCAmelCase ( self , A , A ):
_lowerCamelCase , _lowerCamelCase : Any = torch.sort(A_ , 1 , descending=A_ )
_lowerCamelCase : List[Any] = torch.exp(A_ )
_lowerCamelCase : List[Any] = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
_lowerCamelCase : Tuple = torch.full_like(keep_mask[:, 0:1, :] , A_ )
_lowerCamelCase : List[str] = torch.cat((all_true, keep_mask) , dim=1 )
_lowerCamelCase : Optional[Any] = keep_mask[:, :-1, :]
_lowerCamelCase : Optional[Any] = keep_mask.gather(1 , indices.argsort(1 ) )
_lowerCamelCase : Dict = log_p_x_0.clone()
_lowerCamelCase : Optional[int] = -torch.inf # -inf = log(0)
return rv
| 437 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : Optional[int] = {
'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """perceiver"""
def __init__( self , A_=256 , A_=1280 , A_=768 , A_=1 , A_=26 , A_=8 , A_=8 , A_=None , A_=None , A_="kv" , A_=1 , A_=1 , A_="gelu" , A_=0.1 , A_=0.02 , A_=1e-12 , A_=True , A_=262 , A_=2048 , A_=56 , A_=[368, 496] , A_=16 , A_=1920 , A_=16 , A_=[1, 16, 224, 224] , **A_ , )-> str:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = num_latents
UpperCamelCase = d_latents
UpperCamelCase = d_model
UpperCamelCase = num_blocks
UpperCamelCase = num_self_attends_per_block
UpperCamelCase = num_self_attention_heads
UpperCamelCase = num_cross_attention_heads
UpperCamelCase = qk_channels
UpperCamelCase = v_channels
UpperCamelCase = cross_attention_shape_for_attention
UpperCamelCase = self_attention_widening_factor
UpperCamelCase = cross_attention_widening_factor
UpperCamelCase = hidden_act
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = use_query_residual
# masked language modeling attributes
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
# image classification attributes
UpperCamelCase = image_size
# flow attributes
UpperCamelCase = train_size
# multimodal autoencoding attributes
UpperCamelCase = num_frames
UpperCamelCase = audio_samples_per_frame
UpperCamelCase = samples_per_patch
UpperCamelCase = output_shape
class SCREAMING_SNAKE_CASE__ ( snake_case_):
@property
def UpperCAmelCase_ ( self )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def UpperCAmelCase_ ( self )-> float:
'''simple docstring'''
return 1e-4
def UpperCAmelCase_ ( self , A_ , A_ = -1 , A_ = -1 , A_ = -1 , A_ = False , A_ = None , A_ = 3 , A_ = 40 , A_ = 40 , )-> Mapping[str, Any]:
'''simple docstring'''
if isinstance(A_ , A_ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase = compute_effective_axis_dimension(
A_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase = preprocessor.num_special_tokens_to_add(A_ )
UpperCamelCase = compute_effective_axis_dimension(
A_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A_ )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase = [' '.join(['a'] ) * seq_length] * batch_size
UpperCamelCase = dict(preprocessor(A_ , return_tensors=A_ ) )
UpperCamelCase = inputs.pop('input_ids' )
return inputs
elif isinstance(A_ , A_ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase = compute_effective_axis_dimension(A_ , fixed_dimension=OnnxConfig.default_fixed_batch )
UpperCamelCase = self._generate_dummy_images(A_ , A_ , A_ , A_ )
UpperCamelCase = dict(preprocessor(images=A_ , return_tensors=A_ ) )
UpperCamelCase = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 3 | 0 |
from string import ascii_uppercase
UpperCamelCase_ = {char: i for i, char in enumerate(ascii_uppercase)}
UpperCamelCase_ = dict(enumerate(ascii_uppercase))
def _UpperCAmelCase ( UpperCamelCase: str , UpperCamelCase: str ):
"""simple docstring"""
__lowerCAmelCase = len(UpperCamelCase )
__lowerCAmelCase = 0
while True:
if x == i:
__lowerCAmelCase = 0
if len(UpperCamelCase ) == len(UpperCamelCase ):
break
key += key[i]
i += 1
return key
def _UpperCAmelCase ( UpperCamelCase: str , UpperCamelCase: str ):
"""simple docstring"""
__lowerCAmelCase = ""
__lowerCAmelCase = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__lowerCAmelCase = (dicta[letter] - dicta[key_new[i]]) % 2_6
i += 1
cipher_text += dicta[x]
return cipher_text
def _UpperCAmelCase ( UpperCamelCase: str , UpperCamelCase: str ):
"""simple docstring"""
__lowerCAmelCase = ""
__lowerCAmelCase = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__lowerCAmelCase = (dicta[letter] + dicta[key_new[i]] + 2_6) % 2_6
i += 1
or_txt += dicta[x]
return or_txt
def _UpperCAmelCase ( ):
"""simple docstring"""
__lowerCAmelCase = "THE GERMAN ATTACK"
__lowerCAmelCase = "SECRET"
__lowerCAmelCase = generate_key(UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = cipher_text(UpperCamelCase , UpperCamelCase )
print(F"Encrypted Text = {s}" )
print(F"Original Text = {original_text(UpperCamelCase , UpperCamelCase )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 611 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Dict = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """mctct"""
def __init__( self , A_=8065 , A_=1536 , A_=36 , A_=6144 , A_=4 , A_=384 , A_=920 , A_=1e-5 , A_=0.3 , A_="relu" , A_=0.02 , A_=0.3 , A_=0.3 , A_=1 , A_=0 , A_=2 , A_=1 , A_=0.3 , A_=1 , A_=(7,) , A_=(3,) , A_=80 , A_=1 , A_=None , A_="sum" , A_=False , **A_ , )-> str:
'''simple docstring'''
super().__init__(**A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = intermediate_size
UpperCamelCase = num_attention_heads
UpperCamelCase = attention_head_dim
UpperCamelCase = max_position_embeddings
UpperCamelCase = layer_norm_eps
UpperCamelCase = layerdrop
UpperCamelCase = hidden_act
UpperCamelCase = initializer_range
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = pad_token_id
UpperCamelCase = bos_token_id
UpperCamelCase = eos_token_id
UpperCamelCase = conv_glu_dim
UpperCamelCase = conv_dropout
UpperCamelCase = num_conv_layers
UpperCamelCase = input_feat_per_channel
UpperCamelCase = input_channels
UpperCamelCase = conv_channels
UpperCamelCase = ctc_loss_reduction
UpperCamelCase = ctc_zero_infinity
# prevents config testing fail with exporting to json
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '
F'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
| 3 | 0 |
'''simple docstring'''
from collections.abc import Callable
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
snake_case__ : Union[str, Any] = a
snake_case__ : Any = b
if function(__SCREAMING_SNAKE_CASE ) == 0: # one of the a or b is a root for the function
return a
elif function(__SCREAMING_SNAKE_CASE ) == 0:
return b
elif (
function(__SCREAMING_SNAKE_CASE ) * function(__SCREAMING_SNAKE_CASE ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
snake_case__ : Optional[int] = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(__SCREAMING_SNAKE_CASE ) == 0:
return mid
elif function(__SCREAMING_SNAKE_CASE ) * function(__SCREAMING_SNAKE_CASE ) < 0:
snake_case__ : str = mid
else:
snake_case__ : int = mid
snake_case__ : Tuple = start + (end - start) / 2.0
return mid
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ) -> List[str]:
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 270 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCAmelCase : Tuple = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCAmelCase : Optional[int] = TaTokenizerFast
lowerCAmelCase : Any = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCAmelCase : Tuple = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
)
| 3 | 0 |
import os
__lowercase = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = 0
__UpperCamelCase :List[Any] = 0
while index < len(SCREAMING_SNAKE_CASE ) - 1:
__UpperCamelCase :Optional[int] = SYMBOLS[numerals[index]]
__UpperCamelCase :List[Any] = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[str] = ''''''
__UpperCamelCase :List[Any] = num // 1_000
numerals += m_count * "M"
num %= 1_000
__UpperCamelCase :Union[str, Any] = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
__UpperCamelCase :Tuple = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def lowerCamelCase ( SCREAMING_SNAKE_CASE = "/p089_roman.txt" ):
'''simple docstring'''
__UpperCamelCase :List[str] = 0
with open(os.path.dirname(SCREAMING_SNAKE_CASE ) + roman_numerals_filename ) as filea:
__UpperCamelCase :str = filea.readlines()
for line in lines:
__UpperCamelCase :Optional[int] = line.strip()
__UpperCamelCase :int = parse_roman_numerals(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = generate_roman_numerals(SCREAMING_SNAKE_CASE )
savings += len(SCREAMING_SNAKE_CASE ) - len(SCREAMING_SNAKE_CASE )
return savings
if __name__ == "__main__":
print(F'{solution() = }')
| 167 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=False , A_=True , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , )-> Dict:
'''simple docstring'''
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size if size is not None else {'height': 18, 'width': 20}
UpperCamelCase = do_thumbnail
UpperCamelCase = do_align_axis
UpperCamelCase = do_pad
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( snake_case_ , unittest.TestCase):
lowerCAmelCase_ = DonutImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
UpperCamelCase = DonutImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'do_thumbnail' ) )
self.assertTrue(hasattr(A_ , 'do_align_long_axis' ) )
self.assertTrue(hasattr(A_ , 'do_pad' ) )
self.assertTrue(hasattr(A_ , 'do_normalize' ) )
self.assertTrue(hasattr(A_ , 'image_mean' ) )
self.assertTrue(hasattr(A_ , 'image_std' ) )
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
pass
@is_flaky()
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 3 | 0 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('''dataset_size''' ,[None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('''input_in_memory_max_size''' ,['''default''', 0, 100 * 2**20, 900 * 2**20] )
def UpperCamelCase( __UpperCamelCase : str ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Tuple ):
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config ,'''IN_MEMORY_MAX_SIZE''' ,__UpperCamelCase )
lowerCAmelCase_ : Optional[int] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
lowerCAmelCase_ : Any = dataset_size < in_memory_max_size
else:
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : Any = is_small_dataset(__UpperCamelCase )
assert result == expected
| 171 |
'''simple docstring'''
def A_( A : list[int]):
UpperCamelCase = []
if len(A) == 1:
return [nums.copy()]
for _ in range(len(A)):
UpperCamelCase = nums.pop(0)
UpperCamelCase = permute(A)
for perm in permutations:
perm.append(A)
result.extend(A)
nums.append(A)
return result
def A_( A : str):
def backtrack(A : str):
if start == len(A) - 1:
output.append(nums[:])
else:
for i in range(A , len(A)):
UpperCamelCase , UpperCamelCase = nums[i], nums[start]
backtrack(start + 1)
UpperCamelCase , UpperCamelCase = nums[i], nums[start] # backtrack
UpperCamelCase = []
backtrack(0)
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
lowerCAmelCase : Dict = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 3 | 0 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ''
for i in table:
res += inp[i - 1]
return res
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
return data[1:] + data[0]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
SCREAMING_SNAKE_CASE_ : Any = ''
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
SCREAMING_SNAKE_CASE_ : str = int('0b' + data[0] + data[-1] , 2 )
SCREAMING_SNAKE_CASE_ : List[Any] = int('0b' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ : Any = message[:4]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = message[4:]
SCREAMING_SNAKE_CASE_ : Optional[Any] = apply_table(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = xor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = apply_sbox(SCREAMING_SNAKE_CASE , temp[:4] ) # noqa: E741
SCREAMING_SNAKE_CASE_ : Any = apply_sbox(SCREAMING_SNAKE_CASE , temp[4:] )
SCREAMING_SNAKE_CASE_ : str = '0' * (2 - len(SCREAMING_SNAKE_CASE )) + l # noqa: E741
SCREAMING_SNAKE_CASE_ : Optional[int] = '0' * (2 - len(SCREAMING_SNAKE_CASE )) + r
SCREAMING_SNAKE_CASE_ : Optional[int] = apply_table(l + r , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = xor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return temp + right
if __name__ == "__main__":
lowerCAmelCase__: Dict = input("Enter 10 bit key: ")
lowerCAmelCase__: Tuple = input("Enter 8 bit message: ")
lowerCAmelCase__: Any = [6, 3, 7, 4, 8, 5, 10, 9]
lowerCAmelCase__: Any = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
lowerCAmelCase__: Optional[int] = [2, 4, 3, 1]
lowerCAmelCase__: List[str] = [2, 6, 3, 1, 4, 8, 5, 7]
lowerCAmelCase__: Tuple = [4, 1, 3, 5, 7, 2, 8, 6]
lowerCAmelCase__: Union[str, Any] = [4, 1, 2, 3, 2, 3, 4, 1]
lowerCAmelCase__: Tuple = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
lowerCAmelCase__: Tuple = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
lowerCAmelCase__: Optional[Any] = apply_table(key, paa_table)
lowerCAmelCase__: int = temp[:5]
lowerCAmelCase__: str = temp[5:]
lowerCAmelCase__: List[Any] = left_shift(left)
lowerCAmelCase__: List[str] = left_shift(right)
lowerCAmelCase__: Tuple = apply_table(left + right, pa_table)
lowerCAmelCase__: Optional[int] = left_shift(left)
lowerCAmelCase__: List[str] = left_shift(right)
lowerCAmelCase__: int = left_shift(left)
lowerCAmelCase__: str = left_shift(right)
lowerCAmelCase__: int = apply_table(left + right, pa_table)
# encryption
lowerCAmelCase__: Union[str, Any] = apply_table(message, IP)
lowerCAmelCase__: Dict = function(expansion, sa, sa, keya, temp)
lowerCAmelCase__: int = temp[4:] + temp[:4]
lowerCAmelCase__: str = function(expansion, sa, sa, keya, temp)
lowerCAmelCase__: Dict = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
lowerCAmelCase__: Tuple = apply_table(CT, IP)
lowerCAmelCase__: Optional[int] = function(expansion, sa, sa, keya, temp)
lowerCAmelCase__: List[Any] = temp[4:] + temp[:4]
lowerCAmelCase__: Dict = function(expansion, sa, sa, keya, temp)
lowerCAmelCase__: Dict = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 345 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def A_( A : float , A : float , A : int):
UpperCamelCase = x
UpperCamelCase = y
for step in range(A): # noqa: B007
UpperCamelCase = a * a - b * b + x
UpperCamelCase = 2 * a * b + y
UpperCamelCase = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def A_( A : float):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def A_( A : float):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(A , 1 , 1))
def A_( A : int = 800 , A : int = 600 , A : float = -0.6 , A : float = 0 , A : float = 3.2 , A : int = 50 , A : bool = True , ):
UpperCamelCase = Image.new('RGB' , (image_width, image_height))
UpperCamelCase = img.load()
# loop through the image-coordinates
for image_x in range(A):
for image_y in range(A):
# determine the figure-coordinates based on the image-coordinates
UpperCamelCase = figure_width / image_width * image_height
UpperCamelCase = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCamelCase = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCamelCase = get_distance(A , A , A)
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCamelCase = get_color_coded_rgb(A)
else:
UpperCamelCase = get_black_and_white_rgb(A)
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCAmelCase : Any = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 3 | 0 |
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __SCREAMING_SNAKE_CASE ( a__ : str ) -> List[str]: # picklable for multiprocessing
return x.sum()
def __SCREAMING_SNAKE_CASE ( a__ : Union[str, Any] ) -> Union[str, Any]: # picklable for multiprocessing
return i + 1
@dataclass
class lowerCamelCase_ :
_lowercase : Optional[int] = 42
_lowercase : Union[str, Any] = 42
class lowerCamelCase_ ( snake_case_ ):
def lowerCAmelCase_ ( self : List[Any] ):
__A : int = {}
__A : Union[str, Any] = []
__A : Optional[Any] = 1
__A : List[Any] = [1, 2]
__A : Dict = {"""a""": 1, """b""": 2}
__A : Dict = {"""a""": [1, 2], """b""": [3, 4]}
__A : Optional[Any] = {"""a""": {"""1""": 1}, """b""": 2}
__A : Optional[Any] = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
__A : Tuple = {}
__A : Any = []
__A : Union[str, Any] = 2
__A : List[Any] = [2, 3]
__A : Optional[Any] = {"""a""": 2, """b""": 3}
__A : Dict = {"""a""": [2, 3], """b""": [4, 5]}
__A : List[Any] = {"""a""": {"""1""": 2}, """b""": 3}
__A : Union[str, Any] = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
__A : Dict = 2
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
__A : Any = {"""a""": np.eye(2 ), """b""": np.zeros(3 ), """c""": np.ones(2 )}
__A : Any = {"""a""": 2, """b""": 0, """c""": 2}
__A : List[Any] = {
"""a""": np.eye(2 ).astype(A_ ),
"""b""": np.zeros(3 ).astype(A_ ),
"""c""": np.ones(2 ).astype(A_ ),
}
self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ ) , A_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ) , A_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(A_ ): # can't pickle a local lambda
map_nested(lambda __A : x + 1 , A_ , num_proc=A_ )
def lowerCAmelCase_ ( self : Tuple ):
__A : Dict = {"""a""": 1, """b""": 2}
__A : int = {"""a""": 3, """b""": 4}
__A : int = {"""a""": 5, """b""": 6}
__A : List[str] = sorted([("""a""", (1, 3, 5)), ("""b""", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(A_ , A_ , A_ ) ) , A_ )
def lowerCAmelCase_ ( self : str ):
class lowerCamelCase_ :
_lowercase : List[str] = '''bar'''
__A : Union[str, Any] = Foo()
self.assertEqual(foo.my_attr , """bar""" )
with temporary_assignment(A_ , """my_attr""" , """BAR""" ):
self.assertEqual(foo.my_attr , """BAR""" )
self.assertEqual(foo.my_attr , """bar""" )
@pytest.mark.parametrize(
"""iterable_length, num_proc, expected_num_proc""" ,[
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] ,)
def __SCREAMING_SNAKE_CASE ( a__ : Optional[Any] ,a__ : str ,a__ : Optional[Any] ) -> Dict:
with patch("""datasets.utils.py_utils._single_map_nested""" ) as mock_single_map_nested, patch(
"""datasets.parallel.parallel.Pool""" ) as mock_multiprocessing_pool:
__A : List[Any] = {f"""{i}""": i for i in range(a__ )}
__A : List[Any] = map_nested(lambda a__ : x + 10 ,a__ ,num_proc=a__ ,parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class lowerCamelCase_ ( snake_case_ ):
@require_tf
def lowerCAmelCase_ ( self : Tuple ):
import tensorflow as tf
from tensorflow.keras import layers
__A : str = layers.Dense(2 )
def gen_random_output():
__A : int = tf.random.uniform((1, 3) )
return model(A_ ).numpy()
with temp_seed(42 , set_tensorflow=A_ ):
__A : Optional[int] = gen_random_output()
with temp_seed(42 , set_tensorflow=A_ ):
__A : List[Any] = gen_random_output()
__A : int = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def lowerCAmelCase_ ( self : List[Any] ):
import torch
def gen_random_output():
__A : Optional[int] = torch.nn.Linear(3 , 2 )
__A : List[str] = torch.rand(1 , 3 )
return model(A_ ).detach().numpy()
with temp_seed(42 , set_pytorch=A_ ):
__A : Union[str, Any] = gen_random_output()
with temp_seed(42 , set_pytorch=A_ ):
__A : Tuple = gen_random_output()
__A : Union[str, Any] = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def lowerCAmelCase_ ( self : List[str] ):
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
__A : List[Any] = gen_random_output()
with temp_seed(42 ):
__A : List[Any] = gen_random_output()
__A : Optional[Any] = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("""input_data""" ,[{}] )
def __SCREAMING_SNAKE_CASE ( a__ : Any ) -> Optional[Any]:
__A : int = NestedDataStructure(a__ ).data
assert output_data == input_data
@pytest.mark.parametrize(
"""data, expected_output""" ,[
({}, []),
([], []),
("""foo""", ["""foo"""]),
(["""foo""", """bar"""], ["""foo""", """bar"""]),
([["""foo""", """bar"""]], ["""foo""", """bar"""]),
([[["""foo"""], ["""bar"""]]], ["""foo""", """bar"""]),
([[["""foo"""], """bar"""]], ["""foo""", """bar"""]),
({"""a""": 1, """b""": 2}, [1, 2]),
({"""a""": [1, 2], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[[3], [4]]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, [4]]}, [1, 2, 3, 4]),
({"""a""": {"""1""": 1}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": [2]}, [1, 2]),
] ,)
def __SCREAMING_SNAKE_CASE ( a__ : Optional[Any] ,a__ : Optional[int] ) -> int:
__A : Optional[int] = NestedDataStructure(a__ ).flatten()
assert output == expected_output
def __SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
__A : str = A(x=1 ,y="""foobar""" )
__A : Optional[Any] = {"""x""": 1, """y""": """foobar"""}
assert asdict(a__ ) == expected_output
__A : Tuple = {"""a""": {"""b""": A(x=10 ,y="""foo""" )}, """c""": [A(x=20 ,y="""bar""" )]}
__A : List[str] = {"""a""": {"""b""": {"""x""": 10, """y""": """foo"""}}, """c""": [{"""x""": 20, """y""": """bar"""}]}
assert asdict(a__ ) == expected_output
with pytest.raises(a__ ):
asdict([1, A(x=10 ,y="""foo""" )] )
def __SCREAMING_SNAKE_CASE ( a__ : str ) -> List[Any]:
return text.split()
def __SCREAMING_SNAKE_CASE ( a__ : Optional[int] ) -> Optional[Any]:
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
with Pool(2 ) as pool:
__A : Any = list(iflatmap_unordered(a__ ,_split_text ,kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(a__ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
__A : Optional[Any] = list(iflatmap_unordered(a__ ,_split_text ,kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(a__ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
__A : Tuple = []
for yield_time, content in iflatmap_unordered(
a__ ,_aseconds_generator_of_aitems_with_timing ,kwargs_iterable=[{"""content""": """a"""}, {"""content""": """b"""}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(a__ )
assert out.count("""a""" ) == 2
assert out.count("""b""" ) == 2
assert len(a__ ) == 4
| 17 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase : Optional[Any] = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 3 | 0 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def UpperCamelCase__ ( _lowercase : list[float] ) -> Optional[Any]:
return np.maximum(0 , _lowercase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5] | 523 |
'''simple docstring'''
lowerCAmelCase : Optional[Any] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def A_( A : dict , A : str , A : Optional[Any]):
UpperCamelCase = set()
# keep track of all the paths to be checked
UpperCamelCase = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
UpperCamelCase = queue.pop(0)
# get the last node from the path
UpperCamelCase = path[-1]
if node not in explored:
UpperCamelCase = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
UpperCamelCase = list(A)
new_path.append(A)
queue.append(A)
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(A)
# in case there's no path between the 2 nodes
return []
def A_( A : dict , A : str , A : Tuple):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
UpperCamelCase = [start]
UpperCamelCase = set(A)
# Keep tab on distances from `start` node.
UpperCamelCase = {start: 0, target: -1}
while queue:
UpperCamelCase = queue.pop(0)
if node == target:
UpperCamelCase = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node])
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(A)
queue.append(A)
UpperCamelCase = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
| 3 | 0 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowercase__ : Optional[Any] = logging.get_logger(__name__)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : Optional[Any]=None , __lowercase : Optional[Any]=None ):
"""simple docstring"""
if not conversation_id:
snake_case_ = uuid.uuida()
if past_user_inputs is None:
snake_case_ = []
if generated_responses is None:
snake_case_ = []
snake_case_ = conversation_id
snake_case_ = past_user_inputs
snake_case_ = generated_responses
snake_case_ = text
def __eq__( self : Optional[int] , __lowercase : List[Any] ):
"""simple docstring"""
if not isinstance(A_ , A_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def snake_case__ ( self : Optional[Any] , __lowercase : int , __lowercase : Any = False ):
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
f"User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten "
f"with: \"{text}\"." )
snake_case_ = text
else:
logger.warning(
f"User input added while unprocessed input was existing: \"{self.new_user_input}\" new input "
f"ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input" )
else:
snake_case_ = text
def snake_case__ ( self : Union[str, Any] ):
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
snake_case_ = None
def snake_case__ ( self : Dict , __lowercase : Tuple ):
"""simple docstring"""
self.generated_responses.append(A_ )
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Any ):
"""simple docstring"""
snake_case_ = f"Conversation id: {self.uuid} \n"
for is_user, text in self.iter_texts():
snake_case_ = "user" if is_user else "bot"
output += f"{name} >> {text} \n"
return output
@add_end_docstrings(
snake_case_ , r'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class UpperCAmelCase ( snake_case_ ):
'''simple docstring'''
def __init__( self : Optional[Any] , *__lowercase : List[str] , **__lowercase : str ):
"""simple docstring"""
super().__init__(*A_ , **A_ )
if self.tokenizer.pad_token_id is None:
snake_case_ = self.tokenizer.eos_token
def snake_case__ ( self : List[Any] , __lowercase : int=None , __lowercase : List[str]=None , __lowercase : Dict=None , **__lowercase : List[str] ):
"""simple docstring"""
snake_case_ = {}
snake_case_ = {}
snake_case_ = {}
if min_length_for_response is not None:
snake_case_ = min_length_for_response
if minimum_tokens is not None:
snake_case_ = minimum_tokens
if "max_length" in generate_kwargs:
snake_case_ = generate_kwargs["max_length"]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
snake_case_ = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(A_ )
return preprocess_params, forward_params, postprocess_params
def __call__( self : Optional[Any] , __lowercase : str , __lowercase : str=0 , **__lowercase : Union[str, Any] ):
"""simple docstring"""
snake_case_ = super().__call__(A_ , num_workers=A_ , **A_ )
if isinstance(A_ , A_ ) and len(A_ ) == 1:
return outputs[0]
return outputs
def snake_case__ ( self : Dict , __lowercase : int , __lowercase : Optional[Any]=32 ):
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError("ConversationalPipeline, expects Conversation as inputs" )
if conversation.new_user_input is None:
raise ValueError(
f"Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. "
"Add user inputs with the conversation\'s `add_user_input` method" )
if hasattr(self.tokenizer , "_build_conversation_input_ids" ):
snake_case_ = self.tokenizer._build_conversation_input_ids(A_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
snake_case_ = self._legacy_parse_and_tokenize(A_ )
if self.framework == "pt":
snake_case_ = torch.LongTensor([input_ids] )
elif self.framework == "tf":
snake_case_ = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def snake_case__ ( self : Any , __lowercase : int , __lowercase : Tuple=10 , **__lowercase : List[Any] ):
"""simple docstring"""
snake_case_ = generate_kwargs.get("max_length" , self.model.config.max_length )
snake_case_ = model_inputs["input_ids"].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f"Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})" )
snake_case_ = max_length - minimum_tokens
snake_case_ = model_inputs["input_ids"][:, -trim:]
if "attention_mask" in model_inputs:
snake_case_ = model_inputs["attention_mask"][:, -trim:]
snake_case_ = model_inputs.pop("conversation" )
snake_case_ = max_length
snake_case_ = self.model.generate(**A_ , **A_ )
if self.model.config.is_encoder_decoder:
snake_case_ = 1
else:
snake_case_ = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def snake_case__ ( self : Union[str, Any] , __lowercase : Tuple , __lowercase : Tuple=True ):
"""simple docstring"""
snake_case_ = model_outputs["output_ids"]
snake_case_ = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=A_ , clean_up_tokenization_spaces=A_ , )
snake_case_ = model_outputs["conversation"]
conversation.mark_processed()
conversation.append_response(A_ )
return conversation
def snake_case__ ( self : str , __lowercase : int ):
"""simple docstring"""
snake_case_ = self.tokenizer.eos_token_id
snake_case_ = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(A_ , add_special_tokens=A_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(A_ , add_special_tokens=A_ ) )
if len(A_ ) > self.tokenizer.model_max_length:
snake_case_ = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 376 |
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class SCREAMING_SNAKE_CASE__ :
def __init__( self )-> Dict:
'''simple docstring'''
UpperCamelCase = ''
UpperCamelCase = ''
UpperCamelCase = []
UpperCamelCase = 0
UpperCamelCase = 256
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
def UpperCAmelCase_ ( self , A_ )-> str:
'''simple docstring'''
UpperCamelCase = cva.imread(A_ , 0 )
UpperCamelCase = copy.deepcopy(self.img )
UpperCamelCase , UpperCamelCase , UpperCamelCase = plt.hist(self.img.ravel() , 256 , [0, 256] , label='x' )
UpperCamelCase = np.sum(A_ )
for i in range(len(A_ ) ):
UpperCamelCase = x[i] / self.k
self.sk += prk
UpperCamelCase = (self.L - 1) * self.sk
if self.rem != 0:
UpperCamelCase = int(last % last )
UpperCamelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(A_ )
UpperCamelCase = int(np.ma.count(self.img ) / self.img[1].size )
UpperCamelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCamelCase = self.img[j][i]
if num != self.last_list[num]:
UpperCamelCase = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
plt.hist(self.img.ravel() , 256 , [0, 256] )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
lowerCAmelCase : str = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 3 | 0 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class snake_case__ :
def __init__( self : Dict , __a : Dict , __a : Tuple=13 , __a : Optional[Any]=7 , __a : str=True , __a : Optional[Any]=True , __a : Optional[int]=False , __a : Optional[int]=True , __a : Tuple=99 , __a : str=32 , __a : Optional[int]=5 , __a : List[str]=4 , __a : Tuple=37 , __a : str="gelu" , __a : Tuple=0.1 , __a : Union[str, Any]=0.1 , __a : List[str]=512 , __a : List[Any]=16 , __a : Optional[int]=2 , __a : int=0.0_2 , __a : List[str]=3 , __a : List[str]=4 , __a : List[str]=None , ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Optional[Any] = parent
__snake_case : Dict = batch_size
__snake_case : List[Any] = seq_length
__snake_case : Dict = is_training
__snake_case : List[Any] = use_input_mask
__snake_case : Any = use_token_type_ids
__snake_case : int = use_labels
__snake_case : int = vocab_size
__snake_case : Tuple = hidden_size
__snake_case : int = num_hidden_layers
__snake_case : Optional[int] = num_attention_heads
__snake_case : Any = intermediate_size
__snake_case : Dict = hidden_act
__snake_case : Any = hidden_dropout_prob
__snake_case : List[str] = attention_probs_dropout_prob
__snake_case : Any = max_position_embeddings
__snake_case : Dict = type_vocab_size
__snake_case : List[Any] = type_sequence_label_size
__snake_case : Optional[int] = initializer_range
__snake_case : Any = num_labels
__snake_case : Any = num_choices
__snake_case : Dict = scope
def A_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
__snake_case : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Any = None
if self.use_input_mask:
__snake_case : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Optional[Any] = None
if self.use_token_type_ids:
__snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : Tuple = None
__snake_case : Optional[int] = None
__snake_case : Optional[Any] = None
if self.use_labels:
__snake_case : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : Dict = ids_tensor([self.batch_size] , self.num_choices )
__snake_case : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self : int ) -> Dict:
'''simple docstring'''
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , use_stable_embedding=A_ , )
def A_ ( self : Any , __a : Any , __a : List[Any] , __a : Union[str, Any] , __a : str , __a : Optional[Any] , __a : str , __a : Union[str, Any] ) -> str:
'''simple docstring'''
__snake_case : Dict = OpenLlamaModel(config=A_ )
model.to(A_ )
model.eval()
__snake_case : Union[str, Any] = model(A_ , attention_mask=A_ )
__snake_case : Union[str, Any] = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self : Union[str, Any] , __a : int , __a : List[str] , __a : Tuple , __a : Any , __a : List[Any] , __a : List[Any] , __a : List[Any] , __a : str , __a : List[Any] , ) -> Tuple:
'''simple docstring'''
__snake_case : List[str] = True
__snake_case : Tuple = OpenLlamaModel(A_ )
model.to(A_ )
model.eval()
__snake_case : Tuple = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , )
__snake_case : str = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , )
__snake_case : Optional[int] = model(A_ , attention_mask=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self : Any , __a : str , __a : Optional[int] , __a : Tuple , __a : Tuple , __a : Optional[int] , __a : Optional[Any] , __a : List[Any] , __a : str , __a : str , ) -> str:
'''simple docstring'''
__snake_case : List[Any] = OpenLlamaForCausalLM(config=A_ )
model.to(A_ )
model.eval()
__snake_case : int = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self : str , __a : Union[str, Any] , __a : int , __a : int , __a : int , __a : str , __a : Tuple , __a : int , __a : str , __a : int , ) -> Any:
'''simple docstring'''
__snake_case : Union[str, Any] = True
__snake_case : List[str] = True
__snake_case : Any = OpenLlamaForCausalLM(config=A_ )
model.to(A_ )
model.eval()
# first forward pass
__snake_case : List[str] = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , use_cache=A_ , )
__snake_case : Union[str, Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
__snake_case : str = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__snake_case : int = torch.cat([input_ids, next_tokens] , dim=-1 )
__snake_case : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
__snake_case : Dict = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , output_hidden_states=A_ , )['hidden_states'][0]
__snake_case : Optional[int] = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , past_key_values=A_ , output_hidden_states=A_ , )['hidden_states'][0]
# select random slice
__snake_case : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__snake_case : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A_ , A_ , atol=1e-3 ) )
def A_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
__snake_case : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : List[str] = config_and_inputs
__snake_case : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class snake_case__ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
A__ = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
A__ = (OpenLlamaForCausalLM,) if is_torch_available() else ()
A__ = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ = False
A__ = False
def A_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = OpenLlamaModelTester(self )
__snake_case : int = ConfigTester(self , config_class=A_ , hidden_size=37 )
def A_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def A_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def A_ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case : Dict = type
self.model_tester.create_and_check_model(*A_ )
def A_ ( self : Dict ) -> Tuple:
'''simple docstring'''
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Tuple = 3
__snake_case : int = input_dict['input_ids']
__snake_case : int = input_ids.ne(1 ).to(A_ )
__snake_case : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__snake_case : str = OpenLlamaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
__snake_case : str = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
__snake_case , __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Optional[int] = 3
__snake_case : Optional[Any] = 'single_label_classification'
__snake_case : Dict = input_dict['input_ids']
__snake_case : Optional[Any] = input_ids.ne(1 ).to(A_ )
__snake_case : Optional[int] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__snake_case : Any = OpenLlamaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
__snake_case : Optional[int] = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Optional[int] = 3
__snake_case : Tuple = 'multi_label_classification'
__snake_case : Optional[int] = input_dict['input_ids']
__snake_case : str = input_ids.ne(1 ).to(A_ )
__snake_case : Dict = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__snake_case : Dict = OpenLlamaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
__snake_case : Union[str, Any] = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def A_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def A_ ( self : Optional[int] , __a : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Optional[int] = ids_tensor([1, 10] , config.vocab_size )
__snake_case : str = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__snake_case : Optional[Any] = OpenLlamaModel(A_ )
original_model.to(A_ )
original_model.eval()
__snake_case : List[str] = original_model(A_ ).last_hidden_state
__snake_case : Union[str, Any] = original_model(A_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__snake_case : Any = {'type': scaling_type, 'factor': 1_0.0}
__snake_case : Any = OpenLlamaModel(A_ )
scaled_model.to(A_ )
scaled_model.eval()
__snake_case : Dict = scaled_model(A_ ).last_hidden_state
__snake_case : Union[str, Any] = scaled_model(A_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A_ , A_ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(A_ , A_ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A_ , A_ , atol=1e-5 ) )
| 286 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """unispeech-sat"""
def __init__( self , A_=32 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.1 , A_=0.1 , A_=0.02 , A_=1e-5 , A_="group" , A_="gelu" , A_=(512, 512, 512, 512, 512, 512, 512) , A_=(5, 2, 2, 2, 2, 2, 2) , A_=(10, 3, 3, 3, 3, 2, 2) , A_=False , A_=128 , A_=16 , A_=False , A_=True , A_=0.05 , A_=10 , A_=2 , A_=0.0 , A_=10 , A_=0 , A_=320 , A_=2 , A_=0.1 , A_=100 , A_=256 , A_=256 , A_=0.1 , A_="mean" , A_=False , A_=False , A_=256 , A_=(512, 512, 512, 512, 1500) , A_=(5, 3, 3, 1, 1) , A_=(1, 2, 3, 1, 1) , A_=512 , A_=0 , A_=1 , A_=2 , A_=504 , **A_ , )-> Tuple:
'''simple docstring'''
super().__init__(**A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ )
UpperCamelCase = hidden_size
UpperCamelCase = feat_extract_norm
UpperCamelCase = feat_extract_activation
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
UpperCamelCase = conv_bias
UpperCamelCase = num_conv_pos_embeddings
UpperCamelCase = num_conv_pos_embedding_groups
UpperCamelCase = len(self.conv_dim )
UpperCamelCase = num_hidden_layers
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = feat_proj_dropout
UpperCamelCase = final_dropout
UpperCamelCase = layerdrop
UpperCamelCase = layer_norm_eps
UpperCamelCase = initializer_range
UpperCamelCase = vocab_size
UpperCamelCase = num_clusters
UpperCamelCase = do_stable_layer_norm
UpperCamelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase = apply_spec_augment
UpperCamelCase = mask_time_prob
UpperCamelCase = mask_time_length
UpperCamelCase = mask_time_min_masks
UpperCamelCase = mask_feature_prob
UpperCamelCase = mask_feature_length
UpperCamelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCamelCase = num_codevectors_per_group
UpperCamelCase = num_codevector_groups
UpperCamelCase = contrastive_logits_temperature
UpperCamelCase = feat_quantizer_dropout
UpperCamelCase = num_negatives
UpperCamelCase = codevector_dim
UpperCamelCase = proj_codevector_dim
UpperCamelCase = diversity_loss_weight
# ctc loss
UpperCamelCase = ctc_loss_reduction
UpperCamelCase = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
UpperCamelCase = xvector_output_dim
@property
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 3 | 0 |
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
__lowerCamelCase : str = None
try:
import msvcrt
except ImportError:
__lowerCamelCase : Optional[Any] = None
try:
import fcntl
except ImportError:
__lowerCamelCase : Any = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
__lowerCamelCase : Optional[Any] = OSError
# Data
# ------------------------------------------------
__lowerCamelCase : List[str] = [
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
__lowerCamelCase : Dict = '3.0.12'
__lowerCamelCase : Optional[int] = None
def SCREAMING_SNAKE_CASE ( ):
global _logger
snake_case__ : Tuple = _logger or logging.getLogger(__name__ )
return _logger
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , __A : Union[str, Any] ):
snake_case__ : Optional[Any] = lock_file
return None
def __str__( self : int ):
snake_case__ : List[str] = f'''The file lock \'{self.lock_file}\' could not be acquired.'''
return temp
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __A : Optional[Any] ):
snake_case__ : Optional[Any] = lock
return None
def __enter__( self : Optional[int] ):
return self.lock
def __exit__( self : Optional[int] , __A : List[Any] , __A : int , __A : int ):
self.lock.release()
return None
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Tuple , __A : List[Any] , __A : List[str]=-1 , __A : Optional[int]=None ):
snake_case__ : Union[str, Any] = max_filename_length if max_filename_length is not None else 2_5_5
# Hash the filename if it's too long
snake_case__ : Optional[int] = self.hash_filename_if_too_long(A_ , A_ )
# The path to the lock file.
snake_case__ : Optional[Any] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
snake_case__ : Optional[Any] = None
# The default timeout value.
snake_case__ : int = timeout
# We use this lock primarily for the lock counter.
snake_case__ : Any = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
snake_case__ : Any = 0
return None
@property
def _lowercase ( self : Optional[Any] ):
return self._lock_file
@property
def _lowercase ( self : int ):
return self._timeout
@timeout.setter
def _lowercase ( self : Dict , __A : List[Any] ):
snake_case__ : str = float(A_ )
return None
def _lowercase ( self : List[Any] ):
raise NotImplementedError()
def _lowercase ( self : Optional[Any] ):
raise NotImplementedError()
@property
def _lowercase ( self : str ):
return self._lock_file_fd is not None
def _lowercase ( self : List[Any] , __A : int=None , __A : Union[str, Any]=0.0_5 ):
if timeout is None:
snake_case__ : str = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
snake_case__ : Optional[int] = id(self )
snake_case__ : List[Any] = self._lock_file
snake_case__ : Dict = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f'''Attempting to acquire lock {lock_id} on {lock_filename}''' )
self._acquire()
if self.is_locked:
logger().debug(f'''Lock {lock_id} acquired on {lock_filename}''' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f'''Timeout on acquiring lock {lock_id} on {lock_filename}''' )
raise Timeout(self._lock_file )
else:
logger().debug(
f'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' )
time.sleep(A_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
snake_case__ : Dict = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def _lowercase ( self : Dict , __A : Union[str, Any]=False ):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
snake_case__ : Tuple = id(self )
snake_case__ : Tuple = self._lock_file
logger().debug(f'''Attempting to release lock {lock_id} on {lock_filename}''' )
self._release()
snake_case__ : Optional[Any] = 0
logger().debug(f'''Lock {lock_id} released on {lock_filename}''' )
return None
def __enter__( self : Any ):
self.acquire()
return self
def __exit__( self : str , __A : Any , __A : List[str] , __A : List[Any] ):
self.release()
return None
def __del__( self : str ):
self.release(force=A_ )
return None
def _lowercase ( self : Optional[int] , __A : Optional[Any] , __A : Tuple ):
snake_case__ : Dict = os.path.basename(A_ )
if len(A_ ) > max_length and max_length > 0:
snake_case__ : Any = os.path.dirname(A_ )
snake_case__ : str = str(hash(A_ ) )
snake_case__ : Optional[Any] = filename[: max_length - len(A_ ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(A_ , A_ )
else:
return path
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
"""simple docstring"""
def __init__( self : Dict , __A : Optional[int] , __A : str=-1 , __A : Union[str, Any]=None ):
from .file_utils import relative_to_absolute_path
super().__init__(A_ , timeout=A_ , max_filename_length=A_ )
snake_case__ : str = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def _lowercase ( self : Optional[Any] ):
snake_case__ : Dict = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
snake_case__ : str = os.open(self._lock_file , A_ )
except OSError:
pass
else:
try:
msvcrt.locking(A_ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(A_ )
else:
snake_case__ : int = fd
return None
def _lowercase ( self : List[str] ):
snake_case__ : str = self._lock_file_fd
snake_case__ : Union[str, Any] = None
msvcrt.locking(A_ , msvcrt.LK_UNLCK , 1 )
os.close(A_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
"""simple docstring"""
def __init__( self : str , __A : List[Any] , __A : List[str]=-1 , __A : Optional[Any]=None ):
snake_case__ : Dict = os.statvfs(os.path.dirname(A_ ) ).f_namemax
super().__init__(A_ , timeout=A_ , max_filename_length=A_ )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : List[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
snake_case__ : Any = os.open(self._lock_file , A_ )
try:
fcntl.flock(A_ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(A_ )
else:
snake_case__ : Tuple = fd
return None
def _lowercase ( self : Dict ):
snake_case__ : Optional[int] = self._lock_file_fd
snake_case__ : List[str] = None
fcntl.flock(A_ , fcntl.LOCK_UN )
os.close(A_ )
return None
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
"""simple docstring"""
def _lowercase ( self : List[Any] ):
snake_case__ : Union[str, Any] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
snake_case__ : Dict = os.open(self._lock_file , A_ )
except OSError:
pass
else:
snake_case__ : int = fd
return None
def _lowercase ( self : Any ):
os.close(self._lock_file_fd )
snake_case__ : List[str] = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
__lowerCamelCase : List[str] = None
if msvcrt:
__lowerCamelCase : Union[str, Any] = WindowsFileLock
elif fcntl:
__lowerCamelCase : Any = UnixFileLock
else:
__lowerCamelCase : Dict = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 297 |
'''simple docstring'''
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self , A_ , A_=100 , A_=13 , A_=30 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=4 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=3 , A_=None , A_=[0, 1, 2, 3] , )-> Any:
'''simple docstring'''
UpperCamelCase = parent
UpperCamelCase = 100
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = scope
UpperCamelCase = out_indices
UpperCamelCase = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase = (image_size // patch_size) ** 2
UpperCamelCase = num_patches + 1
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ )-> List[str]:
'''simple docstring'''
UpperCamelCase = BeitModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ )-> Any:
'''simple docstring'''
UpperCamelCase = BeitForMaskedImageModeling(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.type_sequence_label_size
UpperCamelCase = BeitForImageClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = BeitForImageClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = self.num_labels
UpperCamelCase = BeitForSemanticSegmentation(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
UpperCamelCase = model(A_ , labels=A_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , unittest.TestCase):
lowerCAmelCase_ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{
"""feature-extraction""": BeitModel,
"""image-classification""": BeitForImageClassification,
"""image-segmentation""": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = BeitModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A_ )
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(A_ ), BeitForMaskedImageModeling]:
continue
UpperCamelCase = model_class(A_ )
model.to(A_ )
model.train()
UpperCamelCase = self._prepare_for_class(A_ , A_ , return_labels=A_ )
UpperCamelCase = model(**A_ ).loss
loss.backward()
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCamelCase = False
UpperCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(A_ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCamelCase = model_class(A_ )
model.gradient_checkpointing_enable()
model.to(A_ )
model.train()
UpperCamelCase = self._prepare_for_class(A_ , A_ , return_labels=A_ )
UpperCamelCase = model(**A_ ).loss
loss.backward()
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = _config_zero_init(A_ )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(config=A_ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = BeitModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def A_( ):
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
@cached_property
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(A_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).pixel_values.to(A_ )
# prepare bool_masked_pos
UpperCamelCase = torch.ones((1, 196) , dtype=torch.bool ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(pixel_values=A_ , bool_masked_pos=A_ )
UpperCamelCase = outputs.logits
# verify the logits
UpperCamelCase = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , A_ )
UpperCamelCase = torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(A_ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , A_ , atol=1e-2 ) )
@slow
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(A_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
UpperCamelCase = outputs.logits
# verify the logits
UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(logits.shape , A_ )
UpperCamelCase = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(A_ )
self.assertTrue(torch.allclose(logits[0, :3] , A_ , atol=1e-4 ) )
UpperCamelCase = 281
self.assertEqual(logits.argmax(-1 ).item() , A_ )
@slow
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
A_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
UpperCamelCase = outputs.logits
# verify the logits
UpperCamelCase = torch.Size((1, 21841) )
self.assertEqual(logits.shape , A_ )
UpperCamelCase = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(A_ )
self.assertTrue(torch.allclose(logits[0, :3] , A_ , atol=1e-4 ) )
UpperCamelCase = 2396
self.assertEqual(logits.argmax(-1 ).item() , A_ )
@slow
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
UpperCamelCase = model.to(A_ )
UpperCamelCase = BeitImageProcessor(do_resize=A_ , size=640 , do_center_crop=A_ )
UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
UpperCamelCase = Image.open(ds[0]['file'] )
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
UpperCamelCase = outputs.logits
# verify the logits
UpperCamelCase = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , A_ )
UpperCamelCase = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
UpperCamelCase = torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
] , device=A_ , )
else:
UpperCamelCase = torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
] , device=A_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A_ , atol=1e-4 ) )
@slow
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
UpperCamelCase = model.to(A_ )
UpperCamelCase = BeitImageProcessor(do_resize=A_ , size=640 , do_center_crop=A_ )
UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
UpperCamelCase = Image.open(ds[0]['file'] )
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
UpperCamelCase = outputs.logits.detach().cpu()
UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=A_ , target_sizes=[(500, 300)] )
UpperCamelCase = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , A_ )
UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=A_ )
UpperCamelCase = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , A_ )
| 3 | 0 |
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'snap-research/efficientformer-l1-300': (
'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'
),
}
class A_(snake_case_ ):
"""simple docstring"""
a_ : Dict = """efficientformer"""
def __init__( self , A = [3, 2, 6, 4] , A = [48, 96, 224, 448] , A = [True, True, True, True] , A = 448 , A = 32 , A = 4 , A = 7 , A = 5 , A = 8 , A = 4 , A = 0.0 , A = 16 , A = 3 , A = 3 , A = 3 , A = 2 , A = 1 , A = 0.0 , A = 1 , A = True , A = True , A = 1E-5 , A = "gelu" , A = 0.0_2 , A = 1E-12 , A = 224 , A = 1E-05 , **A , ):
super().__init__(**A_ )
_lowerCamelCase : Dict = hidden_act
_lowerCamelCase : List[str] = hidden_dropout_prob
_lowerCamelCase : str = hidden_sizes
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : str = layer_norm_eps
_lowerCamelCase : int = patch_size
_lowerCamelCase : int = num_channels
_lowerCamelCase : Dict = depths
_lowerCamelCase : Optional[int] = mlp_expansion_ratio
_lowerCamelCase : str = downsamples
_lowerCamelCase : Optional[Any] = dim
_lowerCamelCase : List[str] = key_dim
_lowerCamelCase : Optional[int] = attention_ratio
_lowerCamelCase : List[str] = resolution
_lowerCamelCase : str = pool_size
_lowerCamelCase : int = downsample_patch_size
_lowerCamelCase : int = downsample_stride
_lowerCamelCase : Dict = downsample_pad
_lowerCamelCase : Dict = drop_path_rate
_lowerCamelCase : Any = num_metaad_blocks
_lowerCamelCase : Tuple = distillation
_lowerCamelCase : List[Any] = use_layer_scale
_lowerCamelCase : Optional[Any] = layer_scale_init_value
_lowerCamelCase : List[Any] = image_size
_lowerCamelCase : List[str] = batch_norm_eps
| 437 |
'''simple docstring'''
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase : Dict = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( enum.Enum):
lowerCAmelCase_ = 0
lowerCAmelCase_ = 1
@add_end_docstrings(snake_case_)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """generated"""
def __init__( self , *A_ , **A_ )-> Optional[int]:
'''simple docstring'''
super().__init__(*A_ , **A_ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCAmelCase_ ( self , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , **A_ , )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = {}
if truncation is not None:
UpperCamelCase = truncation
UpperCamelCase = generate_kwargs
UpperCamelCase = {}
if return_tensors is not None and return_type is None:
UpperCamelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
UpperCamelCase = return_type
if clean_up_tokenization_spaces is not None:
UpperCamelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCamelCase = self.tokenizer.encode(A_ , add_special_tokens=A_ )
if len(A_ ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
UpperCamelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> Optional[int]:
'''simple docstring'''
return True
def UpperCAmelCase_ ( self , *A_ , A_ )-> Any:
'''simple docstring'''
UpperCamelCase = self.model.config.prefix if self.model.config.prefix is not None else ''
if isinstance(args[0] , A_ ):
if self.tokenizer.pad_token_id is None:
raise ValueError('Please make sure that the tokenizer has a pad_token_id when using a batch input' )
UpperCamelCase = ([prefix + arg for arg in args[0]],)
UpperCamelCase = True
elif isinstance(args[0] , A_ ):
UpperCamelCase = (prefix + args[0],)
UpperCamelCase = False
else:
raise ValueError(
F''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
UpperCamelCase = self.tokenizer(*A_ , padding=A_ , truncation=A_ , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *A_ , **A_ )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = super().__call__(*A_ , **A_ )
if (
isinstance(args[0] , A_ )
and all(isinstance(A_ , A_ ) for el in args[0] )
and all(len(A_ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCAmelCase_ ( self , A_ , A_=TruncationStrategy.DO_NOT_TRUNCATE , **A_ )-> Any:
'''simple docstring'''
UpperCamelCase = self._parse_and_tokenize(A_ , truncation=A_ , **A_ )
return inputs
def UpperCAmelCase_ ( self , A_ , **A_ )-> int:
'''simple docstring'''
if self.framework == "pt":
UpperCamelCase , UpperCamelCase = model_inputs['input_ids'].shape
elif self.framework == "tf":
UpperCamelCase , UpperCamelCase = tf.shape(model_inputs['input_ids'] ).numpy()
UpperCamelCase = generate_kwargs.get('min_length' , self.model.config.min_length )
UpperCamelCase = generate_kwargs.get('max_length' , self.model.config.max_length )
self.check_inputs(A_ , generate_kwargs['min_length'] , generate_kwargs['max_length'] )
UpperCamelCase = self.model.generate(**A_ , **A_ )
UpperCamelCase = output_ids.shape[0]
if self.framework == "pt":
UpperCamelCase = output_ids.reshape(A_ , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
UpperCamelCase = tf.reshape(A_ , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCAmelCase_ ( self , A_ , A_=ReturnType.TEXT , A_=False )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
UpperCamelCase = {F'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
UpperCamelCase = {
F'''{self.return_name}_text''': self.tokenizer.decode(
A_ , skip_special_tokens=A_ , clean_up_tokenization_spaces=A_ , )
}
records.append(A_ )
return records
@add_end_docstrings(snake_case_)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """summary"""
def __call__( self , *A_ , **A_ )-> Optional[int]:
'''simple docstring'''
return super().__call__(*A_ , **A_ )
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> bool:
'''simple docstring'''
if max_length < min_length:
logger.warning(F'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
F'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
'a summarization task, where outputs shorter than the input are typically wanted, you might '
F'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(snake_case_)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """translation"""
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> List[Any]:
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
F'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
'increasing your max_length manually, e.g. translator(\'...\', max_length=400)' )
return True
def UpperCAmelCase_ ( self , *A_ , A_=TruncationStrategy.DO_NOT_TRUNCATE , A_=None , A_=None )-> Dict:
'''simple docstring'''
if getattr(self.tokenizer , '_build_translation_inputs' , A_ ):
return self.tokenizer._build_translation_inputs(
*A_ , return_tensors=self.framework , truncation=A_ , src_lang=A_ , tgt_lang=A_ )
else:
return super()._parse_and_tokenize(*A_ , truncation=A_ )
def UpperCAmelCase_ ( self , A_=None , A_=None , **A_ )-> str:
'''simple docstring'''
UpperCamelCase , UpperCamelCase , UpperCamelCase = super()._sanitize_parameters(**A_ )
if src_lang is not None:
UpperCamelCase = src_lang
if tgt_lang is not None:
UpperCamelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
UpperCamelCase = kwargs.get('task' , self.task )
UpperCamelCase = task.split('_' )
if task and len(A_ ) == 4:
# translation, XX, to YY
UpperCamelCase = items[1]
UpperCamelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *A_ , **A_ )-> Any:
'''simple docstring'''
return super().__call__(*A_ , **A_ )
| 3 | 0 |
from PIL import Image
def _UpperCAmelCase ( UpperCamelCase: Image , UpperCamelCase: float ):
"""simple docstring"""
def brightness(UpperCamelCase: int ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError("level must be between -255.0 (black) and 255.0 (white)" )
return img.point(UpperCamelCase )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
UpperCamelCase_ = change_brightness(img, 1_0_0)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 611 |
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = 0
lowerCAmelCase_ = False
lowerCAmelCase_ = 3.0
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=A_ ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} )
@require_cuda
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
UpperCamelCase = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
UpperCamelCase = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , A_ )
@require_multi_gpu
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(A_ , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase : Tuple = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
lowerCAmelCase : List[str] = Accelerator(kwargs_handlers=[ddp_scaler])
lowerCAmelCase : List[Any] = torch.nn.Linear(1_00, 2_00)
lowerCAmelCase : int = accelerator.prepare(model)
# Check the values changed in kwargs
lowerCAmelCase : Dict = ''
lowerCAmelCase : Dict = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 3 | 0 |
'''simple docstring'''
A_ = {
"joule": 1.0,
"kilojoule": 1000,
"megajoule": 1000000,
"gigajoule": 1000000000,
"wattsecond": 1.0,
"watthour": 3600,
"kilowatthour": 3600000,
"newtonmeter": 1.0,
"calorie_nutr": 4186.8,
"kilocalorie_nutr": 4186800.00,
"electronvolt": 1.602_176_634E-19,
"britishthermalunit_it": 1055.05585,
"footpound": 1.35_58_18,
}
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Any:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
snake_case__ : int = (
f"Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n"
f"Valid values are: {', '.join(__SCREAMING_SNAKE_CASE )}"
)
raise ValueError(__SCREAMING_SNAKE_CASE )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 270 |
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_):
@register_to_config
def __init__( self , A_ , A_ = None , A_ = None )-> Tuple:
'''simple docstring'''
super().__init__()
UpperCamelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
UpperCamelCase = torch.zeros(A_ , A_ )
else:
UpperCamelCase = None
UpperCamelCase = torch.nn.Parameter(A_ )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ , )-> Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=A_ , transformer=A_ , text_encoder=A_ , tokenizer=A_ , scheduler=A_ , learned_classifier_free_sampling_embeddings=A_ , )
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> Tuple:
'''simple docstring'''
UpperCamelCase = len(A_ ) if isinstance(A_ , A_ ) else 1
# get prompt text embeddings
UpperCamelCase = self.tokenizer(
A_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
UpperCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
UpperCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=A_ )
# duplicate text embeddings for each generation per prompt
UpperCamelCase = prompt_embeds.repeat_interleave(A_ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
UpperCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings
UpperCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(A_ , 1 , 1 )
else:
UpperCamelCase = [''] * batch_size
UpperCamelCase = text_input_ids.shape[-1]
UpperCamelCase = self.tokenizer(
A_ , padding='max_length' , max_length=A_ , truncation=A_ , return_tensors='pt' , )
UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
UpperCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=A_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase = negative_prompt_embeds.shape[1]
UpperCamelCase = negative_prompt_embeds.repeat(1 , A_ , 1 )
UpperCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , A_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , A_ , A_ = 100 , A_ = 5.0 , A_ = 1.0 , A_ = 1 , A_ = None , A_ = None , A_ = "pil" , A_ = True , A_ = None , A_ = 1 , )-> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(A_ , A_ ):
UpperCamelCase = 1
elif isinstance(A_ , A_ ):
UpperCamelCase = len(A_ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(A_ )}''' )
UpperCamelCase = batch_size * num_images_per_prompt
UpperCamelCase = guidance_scale > 1.0
UpperCamelCase = self._encode_prompt(A_ , A_ , A_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A_ , A_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(A_ )}.''' )
# get the initial completely masked latents unless the user supplied it
UpperCamelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
UpperCamelCase = self.transformer.num_vector_embeds - 1
UpperCamelCase = torch.full(A_ , A_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
UpperCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(A_ , device=self.device )
UpperCamelCase = self.scheduler.timesteps.to(self.device )
UpperCamelCase = latents
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the sample if we are doing classifier free guidance
UpperCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
UpperCamelCase = self.transformer(A_ , encoder_hidden_states=A_ , timestep=A_ ).sample
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase = model_output.chunk(2 )
UpperCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(A_ , dim=1 , keepdim=A_ )
UpperCamelCase = self.truncate(A_ , A_ )
# remove `log(0)`'s (`-inf`s)
UpperCamelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase = self.scheduler.step(A_ , timestep=A_ , sample=A_ , generator=A_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A_ , A_ , A_ )
UpperCamelCase = self.vqvae.config.vq_embed_dim
UpperCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
UpperCamelCase = self.vqvae.quantize.get_codebook_entry(A_ , shape=A_ )
UpperCamelCase = self.vqvae.decode(A_ , force_not_quantize=A_ ).sample
UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
def UpperCAmelCase_ ( self , A_ , A_ )-> torch.FloatTensor:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = torch.sort(A_ , 1 , descending=A_ )
UpperCamelCase = torch.exp(A_ )
UpperCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
UpperCamelCase = torch.full_like(keep_mask[:, 0:1, :] , A_ )
UpperCamelCase = torch.cat((all_true, keep_mask) , dim=1 )
UpperCamelCase = keep_mask[:, :-1, :]
UpperCamelCase = keep_mask.gather(1 , indices.argsort(1 ) )
UpperCamelCase = log_p_x_0.clone()
UpperCamelCase = -torch.inf # -inf = log(0)
return rv
| 3 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.