code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
def _snake_case( ) -> Optional[Any]:
for n in range(1 , 1_000_000 ):
yield n * (n + 1) // 2
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Dict:
lowercase : Tuple = 1
lowercase : int = 2
while i * i <= n:
lowercase : Union[str, Any] = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def _snake_case( ) -> List[Any]:
return next(i for i in triangle_number_generator() if count_divisors(SCREAMING_SNAKE_CASE__ ) > 500 )
if __name__ == "__main__":
print(solution())
| 20 | """simple docstring"""
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class UpperCamelCase ( lowercase_ ):
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCamelCase ,'hidden_sizes' ) )
self.parent.assertTrue(hasattr(__UpperCamelCase ,'num_attention_heads' ) )
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=64 ,__UpperCamelCase=3 ,__UpperCamelCase=3 ,__UpperCamelCase=2 ,__UpperCamelCase=1 ,__UpperCamelCase=16 ,__UpperCamelCase=[128, 256, 384] ,__UpperCamelCase=[4, 6, 8] ,__UpperCamelCase=[2, 3, 4] ,__UpperCamelCase=[16, 16, 16] ,__UpperCamelCase=0 ,__UpperCamelCase=[2, 2, 2] ,__UpperCamelCase=[2, 2, 2] ,__UpperCamelCase=0.02 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=2 ,) -> str:
'''simple docstring'''
lowercase_ : Optional[Any] = parent
lowercase_ : Any = batch_size
lowercase_ : str = image_size
lowercase_ : List[Any] = num_channels
lowercase_ : Any = kernel_size
lowercase_ : Any = stride
lowercase_ : List[str] = padding
lowercase_ : Tuple = hidden_sizes
lowercase_ : List[Any] = num_attention_heads
lowercase_ : Optional[int] = depths
lowercase_ : Union[str, Any] = key_dim
lowercase_ : List[str] = drop_path_rate
lowercase_ : Dict = patch_size
lowercase_ : List[Any] = attention_ratio
lowercase_ : Optional[int] = mlp_ratio
lowercase_ : Any = initializer_range
lowercase_ : Optional[Any] = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
lowercase_ : Dict = is_training
lowercase_ : Tuple = use_labels
lowercase_ : Dict = num_labels
lowercase_ : Union[str, Any] = initializer_range
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : List[str] = None
if self.use_labels:
lowercase_ : List[Any] = ids_tensor([self.batch_size] ,self.num_labels )
lowercase_ : List[str] = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return LevitConfig(
image_size=self.image_size ,num_channels=self.num_channels ,kernel_size=self.kernel_size ,stride=self.stride ,padding=self.padding ,patch_size=self.patch_size ,hidden_sizes=self.hidden_sizes ,num_attention_heads=self.num_attention_heads ,depths=self.depths ,key_dim=self.key_dim ,drop_path_rate=self.drop_path_rate ,mlp_ratio=self.mlp_ratio ,attention_ratio=self.attention_ratio ,initializer_range=self.initializer_range ,down_ops=self.down_ops ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Any:
'''simple docstring'''
lowercase_ : Union[str, Any] = LevitModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Optional[Any] = model(__UpperCamelCase )
lowercase_ : Union[str, Any] = (self.image_size, self.image_size)
lowercase_ , lowercase_ : Tuple = image_size[0], image_size[1]
for _ in range(4 ):
lowercase_ : Optional[int] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
lowercase_ : Dict = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : str = self.num_labels
lowercase_ : Optional[Any] = LevitForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Union[str, Any] = model(__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : Tuple = config_and_inputs
lowercase_ : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
lowercase = (
{
'feature-extraction': LevitModel,
'image-classification': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Tuple = LevitModelTester(self )
lowercase_ : Dict = ConfigTester(self ,config_class=__UpperCamelCase ,has_text_modality=__UpperCamelCase ,hidden_size=37 )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
return
@unittest.skip(reason='Levit does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='Levit does not support input and output embeddings' )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='Levit does not output attentions' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Optional[int] = model_class(__UpperCamelCase )
lowercase_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
def check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : List[Any] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
lowercase_ : Optional[int] = model(**self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ) )
lowercase_ : Dict = outputs.hidden_states
lowercase_ : List[str] = len(self.model_tester.depths ) + 1
self.assertEqual(len(__UpperCamelCase ) ,__UpperCamelCase )
lowercase_ : Optional[int] = (self.model_tester.image_size, self.model_tester.image_size)
lowercase_ , lowercase_ : Union[str, Any] = image_size[0], image_size[1]
for _ in range(4 ):
lowercase_ : Optional[Any] = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
lowercase_ : Optional[int] = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[
height * width,
self.model_tester.hidden_sizes[0],
] ,)
lowercase_ , lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Any = True
check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Optional[int] = True
check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
pass
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=False ) -> List[Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = super()._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ,return_labels=__UpperCamelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
lowercase_ , lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Optional[Any] = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__UpperCamelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
lowercase_ : Dict = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
lowercase_ : List[Any] = self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ,return_labels=__UpperCamelCase )
lowercase_ : Union[str, Any] = model(**__UpperCamelCase ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase_ : Union[str, Any] = False
lowercase_ : Dict = True
for model_class in self.all_model_classes:
if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
lowercase_ : Dict = model_class(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCamelCase )
model.train()
lowercase_ : Tuple = self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ,return_labels=__UpperCamelCase )
lowercase_ : Any = model(**__UpperCamelCase ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : int = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__UpperCamelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'''Testing {model_class} with {problem_type["title"]}''' ):
lowercase_ : Any = problem_type['title']
lowercase_ : Tuple = problem_type['num_labels']
lowercase_ : Tuple = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
lowercase_ : Optional[int] = self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ,return_labels=__UpperCamelCase )
if problem_type["num_labels"] > 1:
lowercase_ : Any = inputs['labels'].unsqueeze(1 ).repeat(1 ,problem_type['num_labels'] )
lowercase_ : str = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__UpperCamelCase ) as warning_list:
lowercase_ : int = model(**__UpperCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[Any] = LevitModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def lowercase__( ):
lowercase_ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Any = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__UpperCamelCase )
lowercase_ : str = self.default_image_processor
lowercase_ : Any = prepare_img()
lowercase_ : Optional[int] = image_processor(images=__UpperCamelCase ,return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
lowercase_ : Union[str, Any] = model(**__UpperCamelCase )
# verify the logits
lowercase_ : str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,__UpperCamelCase )
lowercase_ : Any = torch.tensor([1.0448, -0.3745, -1.8317] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__UpperCamelCase ,atol=1e-4 ) )
| 213 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a ( unittest.TestCase ):
def __init__( self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=18 , __magic_name__=30 , __magic_name__=4_00 , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=None , __magic_name__=True , ) -> str:
_a = size if size is not None else {"shortest_edge": 20}
_a = crop_size if crop_size is not None else {"height": 18, "width": 18}
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = do_center_crop
_a = crop_size
_a = do_flip_channel_order
def __UpperCAmelCase ( self ) -> Tuple:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class a ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowerCAmelCase = MobileViTImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self ) -> Optional[int]:
_a = MobileViTImageProcessingTester(self )
@property
def __UpperCAmelCase ( self ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ) -> int:
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'size' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_center_crop' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'center_crop' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_flip_channel_order' ) )
def __UpperCAmelCase ( self ) -> Tuple:
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
_a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def __UpperCAmelCase ( self ) -> List[str]:
pass
def __UpperCAmelCase ( self ) -> List[Any]:
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_a = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __UpperCAmelCase ( self ) -> str:
_a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_a = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
_a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_a = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 367 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
a_ : List[str] = logging.get_logger(__name__)
class a ( _SCREAMING_SNAKE_CASE ):
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Any:
_a = feature_size
_a = sampling_rate
_a = padding_value
_a = kwargs.pop('padding_side' , 'right' )
_a = kwargs.pop('return_attention_mask' , __magic_name__ )
super().__init__(**__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = True , __magic_name__ = None , __magic_name__ = False , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(__magic_name__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
_a = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
f' to this method that includes {self.model_input_names[0]}, but you provided'
f' {list(processed_features.keys() )}' )
_a = processed_features[self.model_input_names[0]]
_a = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(__magic_name__ ) == 0:
if return_attention_mask:
_a = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
_a = required_input[0]
if isinstance(__magic_name__ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
_a = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(__magic_name__ ):
_a = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(__magic_name__ ):
_a = 'tf'
elif is_torch_tensor(__magic_name__ ):
_a = 'pt'
elif isinstance(__magic_name__ , (int, float, list, tuple, np.ndarray) ):
_a = 'np'
else:
raise ValueError(
f'type of {first_element} unknown: {type(__magic_name__ )}. '
'Should be one of a python, numpy, pytorch or tensorflow object.' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
_a = to_numpy(__magic_name__ )
else:
_a = [to_numpy(__magic_name__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
_a = self._get_padding_strategies(padding=__magic_name__ , max_length=__magic_name__ )
_a = processed_features[self.model_input_names[0]]
_a = len(__magic_name__ )
if not all(len(__magic_name__ ) == batch_size for v in processed_features.values() ):
raise ValueError('Some items in the output dictionary have a different batch size than others.' )
_a = []
for i in range(__magic_name__ ):
_a = {k: v[i] for k, v in processed_features.items()}
# truncation
_a = self._truncate(
__magic_name__ , max_length=__magic_name__ , pad_to_multiple_of=__magic_name__ , truncation=__magic_name__ , )
truncated_inputs.append(__magic_name__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
_a = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
_a = PaddingStrategy.MAX_LENGTH
_a = {}
for i in range(__magic_name__ ):
# padding
_a = self._pad(
truncated_inputs[i] , max_length=__magic_name__ , padding_strategy=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_attention_mask=__magic_name__ , )
for key, value in outputs.items():
if key not in batch_outputs:
_a = []
if value.dtype is np.dtype(np.floataa ):
_a = value.astype(np.floataa )
batch_outputs[key].append(__magic_name__ )
return BatchFeature(__magic_name__ , tensor_type=__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = PaddingStrategy.DO_NOT_PAD , __magic_name__ = None , __magic_name__ = None , ) -> dict:
_a = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
_a = len(__magic_name__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_a = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_a = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__magic_name__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
_a = np.ones(len(__magic_name__ ) , dtype=np.intaa )
if needs_to_be_padded:
_a = max_length - len(__magic_name__ )
if self.padding_side == "right":
if return_attention_mask:
_a = np.pad(
processed_features['attention_mask'] , (0, difference) )
_a = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
_a = np.pad(
__magic_name__ , __magic_name__ , 'constant' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
_a = np.pad(
processed_features['attention_mask'] , (difference, 0) )
_a = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
_a = np.pad(
__magic_name__ , __magic_name__ , 'constant' , constant_values=self.padding_value )
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return processed_features
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , ) -> List[str]:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' )
_a = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_a = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_a = len(__magic_name__ ) > max_length
if needs_to_be_truncated:
_a = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
_a = processed_features['attention_mask'][:max_length]
return processed_features
def __UpperCAmelCase ( self , __magic_name__=False , __magic_name__=None ) -> Dict:
# Get padding strategy
if padding is not False:
if padding is True:
_a = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(__magic_name__ , __magic_name__ ):
_a = PaddingStrategy(__magic_name__ )
elif isinstance(__magic_name__ , __magic_name__ ):
_a = padding
else:
_a = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' )
return padding_strategy
| 104 | 0 |
def _lowerCAmelCase (_lowerCAmelCase = 10 , _lowerCAmelCase = 10_00 , _lowerCAmelCase = True):
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase)
and isinstance(_lowerCAmelCase , _lowerCAmelCase)
and isinstance(_lowerCAmelCase , _lowerCAmelCase)
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)")
return min_val if option else max_val
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase):
return int((number_a + number_a) / 2)
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase) and isinstance(_lowerCAmelCase , _lowerCAmelCase) and isinstance(_lowerCAmelCase , _lowerCAmelCase)
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)")
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value")
def answer(_lowerCAmelCase) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started...")
UpperCamelCase_ = lower
UpperCamelCase_ = higher
UpperCamelCase_ = []
while True:
UpperCamelCase_ = get_avg(_lowerCAmelCase , _lowerCAmelCase)
last_numbers.append(_lowerCAmelCase)
if answer(_lowerCAmelCase) == "low":
UpperCamelCase_ = number
elif answer(_lowerCAmelCase) == "high":
UpperCamelCase_ = number
else:
break
print(f"""guess the number : {last_numbers[-1]}""")
print(f"""details : {last_numbers!s}""")
def _lowerCAmelCase ():
UpperCamelCase_ = int(input("Enter lower value : ").strip())
UpperCamelCase_ = int(input("Enter high value : ").strip())
UpperCamelCase_ = int(input("Enter value to guess : ").strip())
guess_the_number(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
if __name__ == "__main__":
main()
| 128 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Union[str, Any] =logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] ={
"""sayakpaul/vit-msn-base""": """https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json""",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class _lowercase (a_ ):
'''simple docstring'''
lowercase__ = """vit_msn"""
def __init__( self , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1e-06 , snake_case__=224 , snake_case__=16 , snake_case__=3 , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = initializer_range
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = image_size
UpperCamelCase_ = patch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = qkv_bias
| 128 | 1 |
"""simple docstring"""
import qiskit
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
A__ = qiskit.QuantumCircuit(UpperCamelCase__ , UpperCamelCase__ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
A__ = qiskit.execute(UpperCamelCase__ , UpperCamelCase__ , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(UpperCamelCase__ )
if __name__ == "__main__":
__lowerCamelCase = single_qubit_measure(2, 2)
print(F'''Total count for various states are: {counts}''')
| 154 | """simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__( __A , unittest.TestCase ):
lowerCAmelCase__ : Any = SpeechTaTokenizer
lowerCAmelCase__ : List[str] = False
lowerCAmelCase__ : List[str] = True
def snake_case__ ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
A__ = SpeechTaTokenizer(__UpperCAmelCase )
A__ = AddedToken('<mask>' ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase )
A__ = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self ,__UpperCAmelCase ) -> Optional[Any]:
A__ = 'this is a test'
A__ = 'this is a test'
return input_text, output_text
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase=False ,__UpperCAmelCase=20 ,__UpperCAmelCase=5 ) -> Union[str, Any]:
A__ , A__ = self.get_input_output_texts(__UpperCAmelCase )
A__ = tokenizer.encode(__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase )
A__ = tokenizer.decode(__UpperCAmelCase ,clean_up_tokenization_spaces=__UpperCAmelCase )
return text, ids
def snake_case__ ( self ) -> Optional[Any]:
A__ = '<pad>'
A__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) ,__UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) ,__UpperCAmelCase )
def snake_case__ ( self ) -> Tuple:
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<s>' )
self.assertEqual(vocab_keys[1] ,'<pad>' )
self.assertEqual(vocab_keys[-4] ,'œ' )
self.assertEqual(vocab_keys[-2] ,'<mask>' )
self.assertEqual(vocab_keys[-1] ,'<ctc_blank>' )
self.assertEqual(len(__UpperCAmelCase ) ,81 )
def snake_case__ ( self ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size ,79 )
def snake_case__ ( self ) -> Tuple:
A__ = self.get_tokenizers(do_lower_case=__UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
A__ = tokenizer.vocab_size
A__ = len(__UpperCAmelCase )
self.assertNotEqual(__UpperCAmelCase ,0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
A__ = ['aaaaa bbbbbb', 'cccccccccdddddddd']
A__ = tokenizer.add_tokens(__UpperCAmelCase )
A__ = tokenizer.vocab_size
A__ = len(__UpperCAmelCase )
self.assertNotEqual(__UpperCAmelCase ,0 )
self.assertEqual(__UpperCAmelCase ,__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase ,len(__UpperCAmelCase ) )
self.assertEqual(__UpperCAmelCase ,all_size + len(__UpperCAmelCase ) )
A__ = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' ,add_special_tokens=__UpperCAmelCase )
self.assertGreaterEqual(len(__UpperCAmelCase ) ,4 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
A__ = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
A__ = tokenizer.add_special_tokens(__UpperCAmelCase )
A__ = tokenizer.vocab_size
A__ = len(__UpperCAmelCase )
self.assertNotEqual(__UpperCAmelCase ,0 )
self.assertEqual(__UpperCAmelCase ,__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase ,len(__UpperCAmelCase ) )
self.assertEqual(__UpperCAmelCase ,all_size_a + len(__UpperCAmelCase ) )
A__ = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' ,add_special_tokens=__UpperCAmelCase )
self.assertGreaterEqual(len(__UpperCAmelCase ) ,6 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] ,tokens[1] )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokens[-4] )
self.assertEqual(tokens[0] ,tokenizer.eos_token_id )
self.assertEqual(tokens[-3] ,tokenizer.pad_token_id )
def snake_case__ ( self ) -> List[str]:
pass
def snake_case__ ( self ) -> List[str]:
pass
def snake_case__ ( self ) -> Dict:
A__ = self.get_tokenizer()
A__ = tokenizer.tokenize('This is a test' )
# fmt: off
self.assertListEqual(__UpperCAmelCase ,[SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) ,[4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] ,)
A__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__UpperCAmelCase ,[SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
A__ = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
# fmt: off
self.assertListEqual(__UpperCAmelCase ,[4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
A__ = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase ,[SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
@slow
def snake_case__ ( self ) -> Union[str, Any]:
# Use custom sequence because this tokenizer does not handle numbers.
A__ = [
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
A__ = {
'input_ids': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase ,model_name='microsoft/speecht5_asr' ,revision='c5ef64c71905caeccde0e4462ef3f9077224c524' ,sequences=__UpperCAmelCase ,)
| 154 | 1 |
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def __magic_name__ ( self : Union[str, Any], __A : Dict=0 ):
UpperCAmelCase : Tuple = np.random.RandomState(__A )
UpperCAmelCase : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def __magic_name__ ( self : str ):
UpperCAmelCase : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=__A )
UpperCAmelCase : List[str] = self.get_dummy_inputs()
UpperCAmelCase : Union[str, Any] = pipe(**__A ).images
UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
UpperCAmelCase : List[str] = np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='''CPUExecutionProvider''' )
UpperCAmelCase : Optional[Any] = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=__A )
pipe.set_progress_bar_config(disable=__A )
UpperCAmelCase : List[Any] = self.get_dummy_inputs()
UpperCAmelCase : str = pipe(**__A ).images
UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
UpperCAmelCase : Optional[Any] = np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='''CPUExecutionProvider''' )
UpperCAmelCase : List[str] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__A )
UpperCAmelCase : str = self.get_dummy_inputs()
UpperCAmelCase : str = pipe(**__A ).images
UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
UpperCAmelCase : Optional[int] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __magic_name__ ( self : Dict ):
UpperCAmelCase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='''CPUExecutionProvider''' )
UpperCAmelCase : Tuple = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__A )
UpperCAmelCase : Dict = self.get_dummy_inputs()
UpperCAmelCase : List[str] = pipe(**__A ).images
UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
UpperCAmelCase : int = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='''CPUExecutionProvider''' )
UpperCAmelCase : Optional[Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__A )
UpperCAmelCase : Optional[Any] = self.get_dummy_inputs()
UpperCAmelCase : Dict = pipe(**__A ).images
UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
UpperCAmelCase : Tuple = np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='''CPUExecutionProvider''' )
UpperCAmelCase : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__A )
UpperCAmelCase : Tuple = self.get_dummy_inputs()
UpperCAmelCase : int = pipe(**__A ).images
UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
UpperCAmelCase : Optional[Any] = np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=__A )
UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs()
UpperCAmelCase : int = 3 * [inputs['''prompt''']]
# forward
UpperCAmelCase : Any = pipe(**__A )
UpperCAmelCase : int = output.images[0, -3:, -3:, -1]
UpperCAmelCase : Optional[int] = self.get_dummy_inputs()
UpperCAmelCase : Dict = 3 * [inputs.pop('''prompt''' )]
UpperCAmelCase : List[str] = pipe.tokenizer(
__A, padding='''max_length''', max_length=pipe.tokenizer.model_max_length, truncation=__A, return_tensors='''np''', )
UpperCAmelCase : List[str] = text_inputs['''input_ids''']
UpperCAmelCase : Union[str, Any] = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
UpperCAmelCase : int = prompt_embeds
# forward
UpperCAmelCase : Any = pipe(**__A )
UpperCAmelCase : Union[str, Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Any = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=__A )
UpperCAmelCase : Tuple = self.get_dummy_inputs()
UpperCAmelCase : Union[str, Any] = 3 * ['''this is a negative prompt''']
UpperCAmelCase : Optional[int] = negative_prompt
UpperCAmelCase : int = 3 * [inputs['''prompt''']]
# forward
UpperCAmelCase : Optional[Any] = pipe(**__A )
UpperCAmelCase : List[str] = output.images[0, -3:, -3:, -1]
UpperCAmelCase : int = self.get_dummy_inputs()
UpperCAmelCase : int = 3 * [inputs.pop('''prompt''' )]
UpperCAmelCase : List[Any] = []
for p in [prompt, negative_prompt]:
UpperCAmelCase : Tuple = pipe.tokenizer(
__A, padding='''max_length''', max_length=pipe.tokenizer.model_max_length, truncation=__A, return_tensors='''np''', )
UpperCAmelCase : Optional[Any] = text_inputs['''input_ids''']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
UpperCAmelCase , UpperCAmelCase : Dict = embeds
# forward
UpperCAmelCase : Union[str, Any] = pipe(**__A )
UpperCAmelCase : Optional[Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
@property
def __magic_name__ ( self : Any ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : Tuple = ort.SessionOptions()
UpperCAmelCase : int = False
return options
def __magic_name__ ( self : Tuple ):
# using the PNDM scheduler by default
UpperCAmelCase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''', revision='''onnx''', safety_checker=__A, feature_extractor=__A, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=__A )
UpperCAmelCase : Union[str, Any] = '''A painting of a squirrel eating a burger'''
np.random.seed(0 )
UpperCAmelCase : Any = sd_pipe([prompt], guidance_scale=6.0, num_inference_steps=1_0, output_type='''np''' )
UpperCAmelCase : Tuple = output.images
UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCAmelCase : Union[str, Any] = np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Union[str, Any] = DDIMScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''', subfolder='''scheduler''', revision='''onnx''' )
UpperCAmelCase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''', revision='''onnx''', scheduler=__A, safety_checker=__A, feature_extractor=__A, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=__A )
UpperCAmelCase : Any = '''open neural network exchange'''
UpperCAmelCase : Optional[int] = np.random.RandomState(0 )
UpperCAmelCase : str = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=1_0, generator=__A, output_type='''np''' )
UpperCAmelCase : Dict = output.images
UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCAmelCase : Optional[Any] = np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__ ( self : int ):
UpperCAmelCase : Tuple = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''', subfolder='''scheduler''', revision='''onnx''' )
UpperCAmelCase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''', revision='''onnx''', scheduler=__A, safety_checker=__A, feature_extractor=__A, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=__A )
UpperCAmelCase : Optional[Any] = '''open neural network exchange'''
UpperCAmelCase : Union[str, Any] = np.random.RandomState(0 )
UpperCAmelCase : Optional[Any] = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=1_0, generator=__A, output_type='''np''' )
UpperCAmelCase : Union[str, Any] = output.images
UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCAmelCase : Optional[int] = np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : List[Any] = 0
def test_callback_fn(__A : int, __A : int, __A : np.ndarray ) -> None:
UpperCAmelCase : str = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 6_4, 6_4)
UpperCAmelCase : Optional[Any] = latents[0, -3:, -3:, -1]
UpperCAmelCase : Optional[int] = np.array(
[-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 6_4, 6_4)
UpperCAmelCase : List[str] = latents[0, -3:, -3:, -1]
UpperCAmelCase : Dict = np.array(
[-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
UpperCAmelCase : Optional[Any] = False
UpperCAmelCase : int = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''', revision='''onnx''', safety_checker=__A, feature_extractor=__A, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=__A )
UpperCAmelCase : Union[str, Any] = '''Andromeda galaxy in a bottle'''
UpperCAmelCase : List[str] = np.random.RandomState(0 )
pipe(
prompt=__A, num_inference_steps=5, guidance_scale=7.5, generator=__A, callback=__A, callback_steps=1, )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Any = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''', revision='''onnx''', safety_checker=__A, feature_extractor=__A, provider=self.gpu_provider, sess_options=self.gpu_options, )
assert isinstance(__A, __A )
assert pipe.safety_checker is None
UpperCAmelCase : int = pipe('''example prompt''', num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__A )
UpperCAmelCase : int = OnnxStableDiffusionPipeline.from_pretrained(__A )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCAmelCase : Optional[int] = pipe('''example prompt''', num_inference_steps=2 ).images[0]
assert image is not None
| 336 |
def a__ ( UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ) -> Optional[Any]:
UpperCAmelCase : List[str] = 0
UpperCAmelCase : List[Any] = len(UpperCAmelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCAmelCase : Optional[int] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCAmelCase ):
return None
UpperCAmelCase : Optional[Any] = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
UpperCAmelCase : Any = left
UpperCAmelCase : List[str] = point
elif point > right:
UpperCAmelCase : Any = right
UpperCAmelCase : List[str] = point
else:
if item < current_item:
UpperCAmelCase : Optional[int] = point - 1
else:
UpperCAmelCase : str = point + 1
return None
def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] ) -> Dict:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCAmelCase : List[str] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCAmelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
elif point > right:
return interpolation_search_by_recursion(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , point - 1 )
else:
return interpolation_search_by_recursion(
UpperCAmelCase , UpperCAmelCase , point + 1 , UpperCAmelCase )
def a__ ( UpperCAmelCase : Union[str, Any] ) -> int:
if collection != sorted(UpperCAmelCase ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
_lowerCamelCase : Optional[int] = 0
if debug == 1:
_lowerCamelCase : Dict = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
_lowerCamelCase : List[Any] = 6_7
_lowerCamelCase : Optional[Any] = interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print("Not found")
| 336 | 1 |
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
__UpperCamelCase = logging.getLogger(__name__)
__UpperCamelCase = {'facebook/bart-base': BartForConditionalGeneration}
__UpperCamelCase = {'facebook/bart-base': BartTokenizer}
def _a ( ) -> str:
"""simple docstring"""
__snake_case : Tuple = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" , type=snake_case_ , default=snake_case_ , help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" , type=snake_case_ , default=5 , help="""The maximum total input sequence length after tokenization.""" , )
parser.add_argument(
"""--num_beams""" , type=snake_case_ , default=snake_case_ , help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) , )
parser.add_argument(
"""--model_name_or_path""" , type=snake_case_ , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=snake_case_ , )
parser.add_argument(
"""--config_name""" , type=snake_case_ , default=snake_case_ , help="""Pretrained config name or path if not the same as model_name""" , )
parser.add_argument(
"""--device""" , type=snake_case_ , default="""cpu""" , help="""Device where the model will be run""" , )
parser.add_argument("""--output_file_path""" , type=snake_case_ , default=snake_case_ , help="""Where to store the final ONNX file.""" )
__snake_case : Dict = parser.parse_args()
return args
def _a ( _lowerCamelCase , _lowerCamelCase="cpu" ) -> Dict:
"""simple docstring"""
__snake_case : str = model_dict[model_name].from_pretrained(snake_case_ ).to(snake_case_ )
__snake_case : Tuple = tokenizer_dict[model_name].from_pretrained(snake_case_ )
if model_name in ["facebook/bart-base"]:
__snake_case : Optional[Any] = 0
__snake_case : Union[str, Any] = None
__snake_case : Any = 0
return huggingface_model, tokenizer
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
"""simple docstring"""
model.eval()
__snake_case : int = None
__snake_case : List[Any] = torch.jit.script(BARTBeamSearchGenerator(snake_case_ ) )
with torch.no_grad():
__snake_case : Optional[Any] = """My friends are cool but they eat too many carbs."""
__snake_case : List[Any] = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors="""pt""" ).to(model.device )
__snake_case : str = model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , num_beams=snake_case_ , max_length=snake_case_ , early_stopping=snake_case_ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
snake_case_ , (
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , snake_case_ , opset_version=14 , input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] , output_names=["""output_ids"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} , example_outputs=snake_case_ , )
logger.info("""Model exported to {}""".format(snake_case_ ) )
__snake_case : Any = remove_dup_initializers(os.path.abspath(snake_case_ ) )
logger.info("""Deduplicated and optimized model written to {}""".format(snake_case_ ) )
__snake_case : Union[str, Any] = onnxruntime.InferenceSession(snake_case_ )
__snake_case : Optional[Any] = ort_sess.run(
snake_case_ , {
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(snake_case_ ),
"""max_length""": np.array(snake_case_ ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def _a ( ) -> int:
"""simple docstring"""
__snake_case : List[Any] = parse_args()
__snake_case : Optional[Any] = 5
__snake_case : Optional[Any] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
__snake_case : Optional[int] = torch.device(args.device )
__snake_case , __snake_case : Tuple = load_model_tokenizer(args.model_name_or_path , snake_case_ )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(snake_case_ )
if args.max_length:
__snake_case : List[str] = args.max_length
if args.num_beams:
__snake_case : Optional[Any] = args.num_beams
if args.output_file_path:
__snake_case : int = args.output_file_path
else:
__snake_case : int = """BART.onnx"""
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if __name__ == "__main__":
main()
| 367 |
'''simple docstring'''
from __future__ import annotations
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> None:
"""simple docstring"""
__snake_case : int = len(_lowerCamelCase )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(_lowerCamelCase ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _lowerCamelCase , _lowerCamelCase , )
def _a ( _lowerCamelCase ) -> None:
"""simple docstring"""
__snake_case : list[list[str]] = []
depth_first_search([] , [] , [] , _lowerCamelCase , _lowerCamelCase )
# Print all the boards
for board in boards:
for column in board:
print(_lowerCamelCase )
print("""""" )
print(len(_lowerCamelCase ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 13 | 0 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {"vocab_file": "spiece.model"}
__a = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
__a = {
"albert-base-v1": 5_12,
"albert-large-v1": 5_12,
"albert-xlarge-v1": 5_12,
"albert-xxlarge-v1": 5_12,
"albert-base-v2": 5_12,
"albert-large-v2": 5_12,
"albert-xlarge-v2": 5_12,
"albert-xxlarge-v2": 5_12,
}
__a = "▁"
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : List[Any] = VOCAB_FILES_NAMES
_A : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_A : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self: List[Any] , snake_case: Union[str, Any] , snake_case: Tuple=True , snake_case: Tuple=True , snake_case: List[str]=False , snake_case: str="[CLS]" , snake_case: Union[str, Any]="[SEP]" , snake_case: Union[str, Any]="<unk>" , snake_case: int="[SEP]" , snake_case: Optional[Any]="<pad>" , snake_case: Dict="[CLS]" , snake_case: Any="[MASK]" , snake_case: Optional[Dict[str, Any]] = None , **snake_case: int , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
snake_case_ :int = (
AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case , normalized=snake_case )
if isinstance(snake_case , snake_case )
else mask_token
)
snake_case_ :List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=snake_case , remove_space=snake_case , keep_accents=snake_case , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , )
snake_case_ :Any = do_lower_case
snake_case_ :Optional[int] = remove_space
snake_case_ :Optional[Any] = keep_accents
snake_case_ :List[str] = vocab_file
snake_case_ :int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case )
@property
def lowerCAmelCase_ ( self: int ) -> str:
return len(self.sp_model )
def lowerCAmelCase_ ( self: Tuple ) -> Optional[Any]:
snake_case_ :Optional[int] = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: str ) -> List[str]:
snake_case_ :List[str] = self.__dict__.copy()
snake_case_ :Optional[Any] = None
return state
def __setstate__( self: Optional[int] , snake_case: Union[str, Any] ) -> Dict:
snake_case_ :List[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case_ :str = {}
snake_case_ :str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase_ ( self: Tuple , snake_case: List[str] ) -> List[str]:
if self.remove_space:
snake_case_ :str = """ """.join(inputs.strip().split() )
else:
snake_case_ :List[str] = inputs
snake_case_ :Union[str, Any] = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
snake_case_ :Optional[Any] = unicodedata.normalize("""NFKD""" , snake_case )
snake_case_ :Tuple = """""".join([c for c in outputs if not unicodedata.combining(snake_case )] )
if self.do_lower_case:
snake_case_ :Optional[Any] = outputs.lower()
return outputs
def lowerCAmelCase_ ( self: List[str] , snake_case: str ) -> List[str]:
snake_case_ :Tuple = self.preprocess_text(snake_case )
snake_case_ :str = self.sp_model.encode(snake_case , out_type=snake_case )
snake_case_ :List[str] = []
for piece in pieces:
if len(snake_case ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
snake_case_ :Tuple = self.sp_model.EncodeAsPieces(piece[:-1].replace(snake_case , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
snake_case_ :List[Any] = cur_pieces[1:]
else:
snake_case_ :Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(snake_case )
else:
new_pieces.append(snake_case )
return new_pieces
def lowerCAmelCase_ ( self: Optional[int] , snake_case: Union[str, Any] ) -> Any:
return self.sp_model.PieceToId(snake_case )
def lowerCAmelCase_ ( self: Tuple , snake_case: Dict ) -> str:
return self.sp_model.IdToPiece(snake_case )
def lowerCAmelCase_ ( self: List[str] , snake_case: Dict ) -> Optional[int]:
snake_case_ :str = []
snake_case_ :Tuple = """"""
snake_case_ :Any = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case ) + token
snake_case_ :Dict = True
snake_case_ :Dict = []
else:
current_sub_tokens.append(snake_case )
snake_case_ :Dict = False
out_string += self.sp_model.decode(snake_case )
return out_string.strip()
def lowerCAmelCase_ ( self: Optional[int] , snake_case: List[int] , snake_case: Optional[List[int]] = None ) -> List[int]:
snake_case_ :int = [self.sep_token_id]
snake_case_ :Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self: Optional[int] , snake_case: List[int] , snake_case: Optional[List[int]] = None , snake_case: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
if token_ids_a is not None:
return [1] + ([0] * len(snake_case )) + [1] + ([0] * len(snake_case )) + [1]
return [1] + ([0] * len(snake_case )) + [1]
def lowerCAmelCase_ ( self: Any , snake_case: List[int] , snake_case: Optional[List[int]] = None ) -> List[int]:
snake_case_ :Tuple = [self.sep_token_id]
snake_case_ :Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self: Dict , snake_case: str , snake_case: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case_ :Any = os.path.join(
snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , """wb""" ) as fi:
snake_case_ :Dict = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,)
| 66 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
'''simple docstring'''
def __init__( self: Tuple ,lowerCamelCase_: List[str] ,lowerCamelCase_: int=13 ,lowerCamelCase_: int=32 ,lowerCamelCase_: Optional[int]=2 ,lowerCamelCase_: Any=3 ,lowerCamelCase_: str=16 ,lowerCamelCase_: Optional[Any]=[1, 2, 1] ,lowerCamelCase_: Tuple=[2, 2, 4] ,lowerCamelCase_: int=2 ,lowerCamelCase_: List[Any]=2.0 ,lowerCamelCase_: str=True ,lowerCamelCase_: Optional[int]=0.0 ,lowerCamelCase_: List[Any]=0.0 ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: Tuple="gelu" ,lowerCamelCase_: Union[str, Any]=False ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Optional[int]=0.0_2 ,lowerCamelCase_: int=1e-5 ,lowerCamelCase_: Optional[int]=True ,lowerCamelCase_: Union[str, Any]=None ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Optional[int]=10 ,lowerCamelCase_: Tuple=8 ,) -> List[Any]:
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : int = image_size
UpperCAmelCase_ : Union[str, Any] = patch_size
UpperCAmelCase_ : Optional[Any] = num_channels
UpperCAmelCase_ : int = embed_dim
UpperCAmelCase_ : Union[str, Any] = depths
UpperCAmelCase_ : List[str] = num_heads
UpperCAmelCase_ : int = window_size
UpperCAmelCase_ : List[str] = mlp_ratio
UpperCAmelCase_ : Tuple = qkv_bias
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : str = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = drop_path_rate
UpperCAmelCase_ : List[str] = hidden_act
UpperCAmelCase_ : int = use_absolute_embeddings
UpperCAmelCase_ : Any = patch_norm
UpperCAmelCase_ : Optional[int] = layer_norm_eps
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : Dict = scope
UpperCAmelCase_ : int = use_labels
UpperCAmelCase_ : Optional[Any] = type_sequence_label_size
UpperCAmelCase_ : List[str] = encoder_stride
def A__ ( self: Any ) -> int:
UpperCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : List[Any] = None
if self.use_labels:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : str = self.get_config()
return config, pixel_values, labels
def A__ ( self: List[Any] ) -> Union[str, Any]:
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def A__ ( self: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[str] ) -> str:
UpperCAmelCase_ : str = SwinvaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase_ : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self: List[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: int ) -> int:
UpperCAmelCase_ : Any = SwinvaForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : Optional[Any] = SwinvaForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : int = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self: int ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ) -> int:
UpperCAmelCase_ : Union[str, Any] = self.type_sequence_label_size
UpperCAmelCase_ : int = SwinvaForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def A__ ( self: str ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = config_and_inputs
UpperCAmelCase_ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Tuple = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
A__ : Optional[Any] = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
A__ : List[Any] = False
A__ : Tuple = False
A__ : int = False
A__ : Union[str, Any] = False
def A__ ( self: List[str] ) -> Optional[Any]:
UpperCAmelCase_ : Any = SwinvaModelTester(self )
UpperCAmelCase_ : str = ConfigTester(self ,config_class=lowerCamelCase_ ,embed_dim=37 )
def A__ ( self: Optional[int] ) -> List[Any]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self: Any ) -> Dict:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def A__ ( self: int ) -> Dict:
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def A__ ( self: Tuple ) -> List[str]:
pass
def A__ ( self: str ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : int = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase_ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ ,nn.Linear ) )
def A__ ( self: Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Dict = model_class(lowerCamelCase_ )
UpperCAmelCase_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : int = [*signature.parameters.keys()]
UpperCAmelCase_ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Any = True
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Union[str, Any] = False
UpperCAmelCase_ : str = True
UpperCAmelCase_ : List[Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : Optional[Any] = outputs.attentions
UpperCAmelCase_ : List[str] = len(self.model_tester.depths )
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase_ : str = True
UpperCAmelCase_ : Optional[Any] = config.window_size**2
UpperCAmelCase_ : Optional[int] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : List[Any] = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
UpperCAmelCase_ : Optional[Any] = len(lowerCamelCase_ )
# Check attention is always last and order is fine
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : List[Any] = True
UpperCAmelCase_ : Tuple = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
if hasattr(self.model_tester ,"""num_hidden_states_types""" ):
UpperCAmelCase_ : List[Any] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
UpperCAmelCase_ : List[str] = 2
self.assertEqual(out_len + added_hidden_states ,len(lowerCamelCase_ ) )
UpperCAmelCase_ : Any = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def A__ ( self: List[str] ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Optional[int] ) -> List[Any]:
UpperCAmelCase_ : str = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : int = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : List[str] = outputs.hidden_states
UpperCAmelCase_ : Optional[Any] = getattr(
self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
# Swinv2 has a different seq_length
UpperCAmelCase_ : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
UpperCAmelCase_ : Optional[int] = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = reshaped_hidden_states[0].shape
UpperCAmelCase_ : Optional[Any] = (
reshaped_hidden_states[0].view(lowerCamelCase_ ,lowerCamelCase_ ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def A__ ( self: Any ) -> int:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCAmelCase_ : Any = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : str = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: List[str] ) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Union[str, Any] = 3
UpperCAmelCase_ : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase_ : List[str] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase_ : Optional[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : List[str] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
def A__ ( self: Optional[int] ) -> str:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def A__ ( self: str ) -> Tuple:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Dict = SwinvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def A__ ( self: Any ) -> int:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[str] = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
UpperCAmelCase_ : int = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self: Dict ) -> Optional[Any]:
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def A__ ( self: str ) -> List[Any]:
UpperCAmelCase_ : Tuple = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
lowerCamelCase_ )
UpperCAmelCase_ : Any = self.default_image_processor
UpperCAmelCase_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
UpperCAmelCase_ : Optional[int] = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**lowerCamelCase_ )
# verify the logits
UpperCAmelCase_ : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase_ )
UpperCAmelCase_ : Any = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1e-4 ) )
| 345 | 0 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __a ( UpperCAmelCase_ ):
@require_torch
def A ( self : List[str] ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
lowerCAmelCase_ : Dict = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowerCAmelCase_ : List[Any] = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowerCAmelCase_ : Optional[int] = '''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowerCAmelCase_ : str = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(__lowercase )
BertModel.from_pretrained(__lowercase )
BertTokenizer.from_pretrained(__lowercase )
pipeline(task="""fill-mask""" , model=__lowercase )
# baseline - just load from_pretrained with normal network
lowerCAmelCase_ : List[Any] = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowerCAmelCase_ : str = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowerCAmelCase_ : Dict = '''1'''
lowerCAmelCase_ : Tuple = subprocess.run(__lowercase , env=__lowercase , check=__lowercase , capture_output=__lowercase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def A ( self : List[Any] ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
lowerCAmelCase_ : List[str] = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowerCAmelCase_ : Dict = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowerCAmelCase_ : Tuple = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowerCAmelCase_ : Optional[int] = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(__lowercase )
BertModel.from_pretrained(__lowercase )
BertTokenizer.from_pretrained(__lowercase )
pipeline(task="""fill-mask""" , model=__lowercase )
# baseline - just load from_pretrained with normal network
lowerCAmelCase_ : Tuple = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowerCAmelCase_ : Any = self.get_env()
lowerCAmelCase_ : Dict = subprocess.run(__lowercase , env=__lowercase , check=__lowercase , capture_output=__lowercase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def A ( self : str ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
lowerCAmelCase_ : Any = '''
from transformers import BertConfig, BertModel, BertTokenizer
'''
lowerCAmelCase_ : Union[str, Any] = '''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
lowerCAmelCase_ : str = '''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
lowerCAmelCase_ : Any = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowerCAmelCase_ : Tuple = self.get_env()
lowerCAmelCase_ : Tuple = subprocess.run(__lowercase , env=__lowercase , check=__lowercase , capture_output=__lowercase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
# next emulate no network
lowerCAmelCase_ : Dict = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowerCAmelCase_ : Optional[Any] = '''1'''
lowerCAmelCase_ : Dict = subprocess.run(__lowercase , env=__lowercase , check=__lowercase , capture_output=__lowercase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def A ( self : Any ):
lowerCAmelCase_ : List[str] = '''
from transformers import pipeline
'''
lowerCAmelCase_ : List[Any] = '''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
lowerCAmelCase_ : int = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
lowerCAmelCase_ : Union[str, Any] = self.get_env()
lowerCAmelCase_ : Optional[Any] = '''1'''
lowerCAmelCase_ : List[Any] = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
lowerCAmelCase_ : List[str] = subprocess.run(__lowercase , env=__lowercase , check=__lowercase , capture_output=__lowercase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"""You cannot infer task automatically within `pipeline` when using offline mode""" , result.stderr.decode().replace("""\n""" , """""" ) , )
@require_torch
def A ( self : Tuple ):
lowerCAmelCase_ : Any = '''
from transformers import AutoModel
'''
lowerCAmelCase_ : int = '''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
lowerCAmelCase_ : Optional[Any] = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowerCAmelCase_ : int = self.get_env()
lowerCAmelCase_ : str = subprocess.run(__lowercase , env=__lowercase , check=__lowercase , capture_output=__lowercase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowerCAmelCase_ : Dict = '''1'''
lowerCAmelCase_ : Tuple = subprocess.run(__lowercase , env=__lowercase , check=__lowercase , capture_output=__lowercase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
| 366 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
__UpperCAmelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__UpperCAmelCase = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
__UpperCAmelCase = {
'google/electra-small-generator': 5_12,
'google/electra-base-generator': 5_12,
'google/electra-large-generator': 5_12,
'google/electra-small-discriminator': 5_12,
'google/electra-base-discriminator': 5_12,
'google/electra-large-discriminator': 5_12,
}
__UpperCAmelCase = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class __a ( __UpperCamelCase ):
__snake_case : List[Any] = VOCAB_FILES_NAMES
__snake_case : List[str] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Dict = PRETRAINED_INIT_CONFIGURATION
__snake_case : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : str = ElectraTokenizer
def __init__( self : List[Any] , UpperCAmelCase : Any=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Dict="[UNK]" , UpperCAmelCase : Any="[SEP]" , UpperCAmelCase : Any="[PAD]" , UpperCAmelCase : Union[str, Any]="[CLS]" , UpperCAmelCase : Optional[Any]="[MASK]" , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : Optional[Any] , ):
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
lowerCAmelCase_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , UpperCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , UpperCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , UpperCAmelCase ) != tokenize_chinese_chars
):
lowerCAmelCase_ : Optional[Any] = getattr(UpperCAmelCase , normalizer_state.pop("""type""" ) )
lowerCAmelCase_ : List[Any] = do_lower_case
lowerCAmelCase_ : Tuple = strip_accents
lowerCAmelCase_ : Union[str, Any] = tokenize_chinese_chars
lowerCAmelCase_ : int = normalizer_class(**UpperCAmelCase )
lowerCAmelCase_ : str = do_lower_case
def A ( self : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any]=None ):
lowerCAmelCase_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A ( self : List[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
lowerCAmelCase_ : str = [self.sep_token_id]
lowerCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
lowerCAmelCase_ : Union[str, Any] = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 28 | 0 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase__ : str = "▁" , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[str, AddedToken] = "<unk>" , UpperCAmelCase__ : Union[str, AddedToken] = "</s>" , UpperCAmelCase__ : Union[str, AddedToken] = "<pad>" , ) ->Optional[Any]:
'''simple docstring'''
A__ = {
'''pad''': {'''id''': 0, '''token''': pad_token},
'''eos''': {'''id''': 1, '''token''': eos_token},
'''unk''': {'''id''': 2, '''token''': unk_token},
}
A__ = [None] * len(self.special_tokens)
for token_dict in self.special_tokens.values():
A__ = token_dict['''token''']
A__ = Tokenizer(Unigram())
A__ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(''' {2,}''') , ''' '''),
normalizers.Lowercase(),
])
A__ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__),
pre_tokenizers.Digits(individual_digits=UpperCAmelCase__),
pre_tokenizers.Punctuation(),
])
A__ = decoders.Metaspace(replacement=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__)
A__ = TemplateProcessing(
single=f"""$A {self.special_tokens["eos"]["token"]}""" , special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])] , )
A__ = {
'''model''': '''SentencePieceUnigram''',
'''replacement''': replacement,
'''add_prefix_space''': add_prefix_space,
}
super().__init__(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : Union[str, List[str]] , UpperCAmelCase__ : int = 8_000 , UpperCAmelCase__ : bool = True , ) ->Tuple:
'''simple docstring'''
A__ = trainers.UnigramTrainer(
vocab_size=UpperCAmelCase__ , special_tokens=self.special_tokens_list , show_progress=UpperCAmelCase__ , )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
A__ = [files]
self._tokenizer.train(UpperCAmelCase__ , trainer=UpperCAmelCase__)
self.add_unk_id()
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Union[Iterator[str], Iterator[Iterator[str]]] , UpperCAmelCase__ : int = 8_000 , UpperCAmelCase__ : bool = True , ) ->List[Any]:
'''simple docstring'''
A__ = trainers.UnigramTrainer(
vocab_size=UpperCAmelCase__ , special_tokens=self.special_tokens_list , show_progress=UpperCAmelCase__ , )
self._tokenizer.train_from_iterator(UpperCAmelCase__ , trainer=UpperCAmelCase__)
self.add_unk_id()
def SCREAMING_SNAKE_CASE ( self : Any) ->str:
'''simple docstring'''
A__ = json.loads(self._tokenizer.to_str())
A__ = self.special_tokens['''unk''']['''id''']
A__ = Tokenizer.from_str(json.dumps(UpperCAmelCase__))
| 14 |
'''simple docstring'''
import os
lowerCAmelCase__ = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
def _A ( A__ ):
"""simple docstring"""
__lowercase = 0
__lowercase = 0
while index < len(A__ ) - 1:
__lowercase = SYMBOLS[numerals[index]]
__lowercase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def _A ( A__ ):
"""simple docstring"""
__lowercase = ''''''
__lowercase = num // 1000
numerals += m_count * "M"
num %= 1000
__lowercase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
__lowercase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def _A ( A__ = "/p089_roman.txt" ):
"""simple docstring"""
__lowercase = 0
with open(os.path.dirname(A__ ) + roman_numerals_filename ) as filea:
__lowercase = filea.readlines()
for line in lines:
__lowercase = line.strip()
__lowercase = parse_roman_numerals(A__ )
__lowercase = generate_roman_numerals(A__ )
savings += len(A__ ) - len(A__ )
return savings
if __name__ == "__main__":
print(f'{solution() = }')
| 104 | 0 |
'''simple docstring'''
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str] ):
__lowercase = OmegaConf.load(lowerCamelCase_ )
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )['''model''']
__lowercase = list(state_dict.keys() )
# extract state_dict for VQVAE
__lowercase = {}
__lowercase = '''first_stage_model.'''
for key in keys:
if key.startswith(lowerCamelCase_ ):
__lowercase = state_dict[key]
# extract state_dict for UNetLDM
__lowercase = {}
__lowercase = '''model.diffusion_model.'''
for key in keys:
if key.startswith(lowerCamelCase_ ):
__lowercase = state_dict[key]
__lowercase = config.model.params.first_stage_config.params
__lowercase = config.model.params.unet_config.params
__lowercase = VQModel(**lowerCamelCase_ ).eval()
vqvae.load_state_dict(lowerCamelCase_ )
__lowercase = UNetLDMModel(**lowerCamelCase_ ).eval()
unet.load_state_dict(lowerCamelCase_ )
__lowercase = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=lowerCamelCase_ , )
__lowercase = LDMPipeline(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
pipeline.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 217 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 217 | 1 |
from collections import namedtuple
__A : List[str] = namedtuple('from_to', 'from_ to')
__A : int = {
'cubicmeter': from_to(1, 1),
'litre': from_to(0.0_01, 10_00),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.0_04_54, 2_64.1_72),
'cubicyard': from_to(0.7_64_55, 1.3_07_95),
'cubicfoot': from_to(0.0_28, 35.31_47),
'cup': from_to(0.0_00_23_65_88, 42_26.75),
}
def __UpperCamelCase ( _A : float , _A : str , _A : str ) ->float:
"""simple docstring"""
if from_type not in METRIC_CONVERSION:
raise ValueError(
f'Invalid \'from_type\' value: {from_type!r} Supported values are:\n'
+ """, """.join(_A ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f'Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'
+ """, """.join(_A ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 154 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE="last" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , )-> Optional[Any]:
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =seq_length
lowerCamelCase_ =is_training
lowerCamelCase_ =use_input_lengths
lowerCamelCase_ =use_token_type_ids
lowerCamelCase_ =use_labels
lowerCamelCase_ =gelu_activation
lowerCamelCase_ =sinusoidal_embeddings
lowerCamelCase_ =causal
lowerCamelCase_ =asm
lowerCamelCase_ =n_langs
lowerCamelCase_ =vocab_size
lowerCamelCase_ =n_special
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =type_sequence_label_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =num_labels
lowerCamelCase_ =num_choices
lowerCamelCase_ =summary_type
lowerCamelCase_ =use_proj
lowerCamelCase_ =scope
def _snake_case ( self )-> Dict:
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ =None
if self.use_input_lengths:
lowerCamelCase_ =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCamelCase_ =None
if self.use_token_type_ids:
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
if self.use_labels:
lowerCamelCase_ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ =ids_tensor([self.batch_size] , 2 ).float()
lowerCamelCase_ =ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _snake_case ( self )-> List[str]:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> str:
lowerCamelCase_ =FlaubertModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , lengths=_SCREAMING_SNAKE_CASE , langs=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , langs=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> List[Any]:
lowerCamelCase_ =FlaubertWithLMHeadModel(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> Optional[Any]:
lowerCamelCase_ =FlaubertForQuestionAnsweringSimple(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> Optional[int]:
lowerCamelCase_ =FlaubertForQuestionAnswering(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(
_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , cls_index=_SCREAMING_SNAKE_CASE , is_impossible=_SCREAMING_SNAKE_CASE , p_mask=_SCREAMING_SNAKE_CASE , )
lowerCamelCase_ =model(
_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , cls_index=_SCREAMING_SNAKE_CASE , is_impossible=_SCREAMING_SNAKE_CASE , )
((lowerCamelCase_) , ) =result_with_labels.to_tuple()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE )
((lowerCamelCase_) , ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> Any:
lowerCamelCase_ =FlaubertForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> List[Any]:
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =FlaubertForTokenClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> Dict:
lowerCamelCase_ =self.num_choices
lowerCamelCase_ =FlaubertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ =model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self )-> int:
lowerCamelCase_ =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) =config_and_inputs
lowerCamelCase_ ={
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase):
_UpperCamelCase:str = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCamelCase:str = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Optional[Any]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False )-> List[Any]:
lowerCamelCase_ =super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
return inputs_dict
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ =FlaubertModelTester(self )
lowerCamelCase_ =ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , emb_dim=37 )
def _snake_case ( self )-> Optional[Any]:
self.config_tester.run_common_tests()
def _snake_case ( self )-> List[Any]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> int:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Tuple:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> List[Any]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> List[Any]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> List[str]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*_SCREAMING_SNAKE_CASE )
@slow
def _snake_case ( self )-> Optional[Any]:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =FlaubertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@slow
@require_torch_gpu
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowerCamelCase_ =True
lowerCamelCase_ =model_class(config=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.jit.trace(
_SCREAMING_SNAKE_CASE , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , """traced_model.pt""" ) )
lowerCamelCase_ =torch.jit.load(os.path.join(_SCREAMING_SNAKE_CASE , """traced_model.pt""" ) , map_location=_SCREAMING_SNAKE_CASE )
loaded(inputs_dict["""input_ids"""].to(_SCREAMING_SNAKE_CASE ) , inputs_dict["""attention_mask"""].to(_SCREAMING_SNAKE_CASE ) )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
@slow
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ =FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
lowerCamelCase_ =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )[0]
lowerCamelCase_ =torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 154 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Dict:
# initialize config
if "resnet-50" in model_name:
UpperCAmelCase_ = ResNetConfig.from_pretrained('''microsoft/resnet-50''' )
elif "resnet-101" in model_name:
UpperCAmelCase_ = ResNetConfig.from_pretrained('''microsoft/resnet-101''' )
else:
raise ValueError('''Model name should include either resnet50 or resnet101''' )
UpperCAmelCase_ = DetrConfig(use_timm_backbone=__UpperCamelCase , backbone_config=__UpperCamelCase )
# set label attributes
UpperCAmelCase_ = '''panoptic''' in model_name
if is_panoptic:
UpperCAmelCase_ = 250
else:
UpperCAmelCase_ = 91
UpperCAmelCase_ = '''huggingface/label-files'''
UpperCAmelCase_ = '''coco-detection-id2label.json'''
UpperCAmelCase_ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> Union[str, Any]:
# here we list all keys to be renamed (original name on the left, our name on the right)
UpperCAmelCase_ = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.conv1.weight''', '''backbone.conv_encoder.model.embedder.embedder.convolution.weight''') )
rename_keys.append(('''backbone.0.body.bn1.weight''', '''backbone.conv_encoder.model.embedder.embedder.normalization.weight''') )
rename_keys.append(('''backbone.0.body.bn1.bias''', '''backbone.conv_encoder.model.embedder.embedder.normalization.bias''') )
rename_keys.append(('''backbone.0.body.bn1.running_mean''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_mean''') )
rename_keys.append(('''backbone.0.body.bn1.running_var''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_var''') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f'transformer.encoder.layers.{i}.self_attn.out_proj.weight',
f'encoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f'transformer.decoder.layers.{i}.self_attn.out_proj.weight',
f'decoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
) )
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
) )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
] )
return rename_keys
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ = state_dict.pop(__UpperCamelCase )
UpperCAmelCase_ = val
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : List[Any]=False ) -> Dict:
UpperCAmelCase_ = ''''''
if is_panoptic:
UpperCAmelCase_ = '''detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
UpperCAmelCase_ = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:256, :]
UpperCAmelCase_ = in_proj_bias[:256]
UpperCAmelCase_ = in_proj_weight[256:512, :]
UpperCAmelCase_ = in_proj_bias[256:512]
UpperCAmelCase_ = in_proj_weight[-256:, :]
UpperCAmelCase_ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCAmelCase_ = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
UpperCAmelCase_ = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:256, :]
UpperCAmelCase_ = in_proj_bias[:256]
UpperCAmelCase_ = in_proj_weight[256:512, :]
UpperCAmelCase_ = in_proj_bias[256:512]
UpperCAmelCase_ = in_proj_weight[-256:, :]
UpperCAmelCase_ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCAmelCase_ = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
UpperCAmelCase_ = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCAmelCase_ = in_proj_weight_cross_attn[:256, :]
UpperCAmelCase_ = in_proj_bias_cross_attn[:256]
UpperCAmelCase_ = in_proj_weight_cross_attn[256:512, :]
UpperCAmelCase_ = in_proj_bias_cross_attn[256:512]
UpperCAmelCase_ = in_proj_weight_cross_attn[-256:, :]
UpperCAmelCase_ = in_proj_bias_cross_attn[-256:]
def SCREAMING_SNAKE_CASE ( ) -> int:
UpperCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase_ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : Any=None , __UpperCamelCase : Optional[Any]=False ) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_ = get_detr_config(__UpperCamelCase )
# load original model from torch hub
UpperCAmelCase_ = {
'''detr-resnet-50''': '''detr_resnet50''',
'''detr-resnet-101''': '''detr_resnet101''',
}
logger.info(f'Converting model {model_name}...' )
UpperCAmelCase_ = torch.hub.load('''facebookresearch/detr''' , model_name_to_original_name[model_name] , pretrained=__UpperCamelCase ).eval()
UpperCAmelCase_ = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(__UpperCamelCase ):
if is_panoptic:
UpperCAmelCase_ = '''detr.''' + src
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(__UpperCamelCase , is_panoptic=__UpperCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase_ = '''detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
UpperCAmelCase_ = state_dict.pop(__UpperCamelCase )
UpperCAmelCase_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCAmelCase_ = state_dict.pop(__UpperCamelCase )
UpperCAmelCase_ = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
UpperCAmelCase_ = state_dict.pop(__UpperCamelCase )
UpperCAmelCase_ = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
UpperCAmelCase_ = state_dict.pop(__UpperCamelCase )
UpperCAmelCase_ = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase_ = DetrForSegmentation(__UpperCamelCase ) if is_panoptic else DetrForObjectDetection(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
model.eval()
# verify our conversion on an image
UpperCAmelCase_ = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
UpperCAmelCase_ = DetrImageProcessor(format=__UpperCamelCase )
UpperCAmelCase_ = processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase_ = encoding['''pixel_values''']
UpperCAmelCase_ = detr(__UpperCamelCase )
UpperCAmelCase_ = model(__UpperCamelCase )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('''Uploading PyTorch model and image processor to the hub...''' )
model.push_to_hub(f'nielsr/{model_name}' )
processor.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
_lowerCamelCase = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 177 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Dict:
# initialize config
if "resnet-50" in model_name:
UpperCAmelCase_ = ResNetConfig.from_pretrained('''microsoft/resnet-50''' )
elif "resnet-101" in model_name:
UpperCAmelCase_ = ResNetConfig.from_pretrained('''microsoft/resnet-101''' )
else:
raise ValueError('''Model name should include either resnet50 or resnet101''' )
UpperCAmelCase_ = DetrConfig(use_timm_backbone=__UpperCamelCase , backbone_config=__UpperCamelCase )
# set label attributes
UpperCAmelCase_ = '''panoptic''' in model_name
if is_panoptic:
UpperCAmelCase_ = 250
else:
UpperCAmelCase_ = 91
UpperCAmelCase_ = '''huggingface/label-files'''
UpperCAmelCase_ = '''coco-detection-id2label.json'''
UpperCAmelCase_ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> Union[str, Any]:
# here we list all keys to be renamed (original name on the left, our name on the right)
UpperCAmelCase_ = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.conv1.weight''', '''backbone.conv_encoder.model.embedder.embedder.convolution.weight''') )
rename_keys.append(('''backbone.0.body.bn1.weight''', '''backbone.conv_encoder.model.embedder.embedder.normalization.weight''') )
rename_keys.append(('''backbone.0.body.bn1.bias''', '''backbone.conv_encoder.model.embedder.embedder.normalization.bias''') )
rename_keys.append(('''backbone.0.body.bn1.running_mean''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_mean''') )
rename_keys.append(('''backbone.0.body.bn1.running_var''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_var''') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f'transformer.encoder.layers.{i}.self_attn.out_proj.weight',
f'encoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f'transformer.decoder.layers.{i}.self_attn.out_proj.weight',
f'decoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
) )
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
) )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
] )
return rename_keys
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ = state_dict.pop(__UpperCamelCase )
UpperCAmelCase_ = val
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : List[Any]=False ) -> Dict:
UpperCAmelCase_ = ''''''
if is_panoptic:
UpperCAmelCase_ = '''detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
UpperCAmelCase_ = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:256, :]
UpperCAmelCase_ = in_proj_bias[:256]
UpperCAmelCase_ = in_proj_weight[256:512, :]
UpperCAmelCase_ = in_proj_bias[256:512]
UpperCAmelCase_ = in_proj_weight[-256:, :]
UpperCAmelCase_ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCAmelCase_ = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
UpperCAmelCase_ = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:256, :]
UpperCAmelCase_ = in_proj_bias[:256]
UpperCAmelCase_ = in_proj_weight[256:512, :]
UpperCAmelCase_ = in_proj_bias[256:512]
UpperCAmelCase_ = in_proj_weight[-256:, :]
UpperCAmelCase_ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCAmelCase_ = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
UpperCAmelCase_ = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCAmelCase_ = in_proj_weight_cross_attn[:256, :]
UpperCAmelCase_ = in_proj_bias_cross_attn[:256]
UpperCAmelCase_ = in_proj_weight_cross_attn[256:512, :]
UpperCAmelCase_ = in_proj_bias_cross_attn[256:512]
UpperCAmelCase_ = in_proj_weight_cross_attn[-256:, :]
UpperCAmelCase_ = in_proj_bias_cross_attn[-256:]
def SCREAMING_SNAKE_CASE ( ) -> int:
UpperCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase_ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : Any=None , __UpperCamelCase : Optional[Any]=False ) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_ = get_detr_config(__UpperCamelCase )
# load original model from torch hub
UpperCAmelCase_ = {
'''detr-resnet-50''': '''detr_resnet50''',
'''detr-resnet-101''': '''detr_resnet101''',
}
logger.info(f'Converting model {model_name}...' )
UpperCAmelCase_ = torch.hub.load('''facebookresearch/detr''' , model_name_to_original_name[model_name] , pretrained=__UpperCamelCase ).eval()
UpperCAmelCase_ = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(__UpperCamelCase ):
if is_panoptic:
UpperCAmelCase_ = '''detr.''' + src
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(__UpperCamelCase , is_panoptic=__UpperCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase_ = '''detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
UpperCAmelCase_ = state_dict.pop(__UpperCamelCase )
UpperCAmelCase_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCAmelCase_ = state_dict.pop(__UpperCamelCase )
UpperCAmelCase_ = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
UpperCAmelCase_ = state_dict.pop(__UpperCamelCase )
UpperCAmelCase_ = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
UpperCAmelCase_ = state_dict.pop(__UpperCamelCase )
UpperCAmelCase_ = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase_ = DetrForSegmentation(__UpperCamelCase ) if is_panoptic else DetrForObjectDetection(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
model.eval()
# verify our conversion on an image
UpperCAmelCase_ = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
UpperCAmelCase_ = DetrImageProcessor(format=__UpperCamelCase )
UpperCAmelCase_ = processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase_ = encoding['''pixel_values''']
UpperCAmelCase_ = detr(__UpperCamelCase )
UpperCAmelCase_ = model(__UpperCamelCase )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('''Uploading PyTorch model and image processor to the hub...''' )
model.push_to_hub(f'nielsr/{model_name}' )
processor.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
_lowerCamelCase = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 177 | 1 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = ["image_processor", "tokenizer"]
lowercase__ = "AutoImageProcessor"
lowercase__ = "AutoTokenizer"
def __init__( self : Any ,lowercase_ : List[str] ,lowercase_ : str ):
super().__init__(lowercase_ ,lowercase_ )
lowerCAmelCase__ : List[Any] = self.image_processor
def __call__( self : str ,lowercase_ : Any=None ,lowercase_ : Any=None ,lowercase_ : Optional[Any]=None ,**lowercase_ : Dict ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowerCAmelCase__ : List[str] = self.tokenizer(lowercase_ ,return_tensors=lowercase_ ,**lowercase_ )
if images is not None:
lowerCAmelCase__ : Optional[int] = self.image_processor(lowercase_ ,return_tensors=lowercase_ ,**lowercase_ )
if text is not None and images is not None:
lowerCAmelCase__ : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) ,tensor_type=lowercase_ )
def __lowerCAmelCase ( self : Optional[int] ,*lowercase_ : List[Any] ,**lowercase_ : Optional[Any] ):
return self.tokenizer.batch_decode(*lowercase_ ,**lowercase_ )
def __lowerCAmelCase ( self : Optional[int] ,*lowercase_ : int ,**lowercase_ : Optional[int] ):
return self.tokenizer.decode(*lowercase_ ,**lowercase_ )
@property
def __lowerCAmelCase ( self : List[Any] ):
return ["input_ids", "attention_mask", "pixel_values"]
| 106 |
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = [0] * len(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] = []
SCREAMING_SNAKE_CASE_: str = []
SCREAMING_SNAKE_CASE_: List[str] = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_UpperCAmelCase ) ):
if indegree[i] == 0:
queue.append(_UpperCAmelCase )
while queue:
SCREAMING_SNAKE_CASE_: Optional[int] = queue.pop(0 )
cnt += 1
topo.append(_UpperCAmelCase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_UpperCAmelCase )
if cnt != len(_UpperCAmelCase ):
print("Cycle exists" )
else:
print(_UpperCAmelCase )
# Adjacency List of Graph
lowerCAmelCase : Any = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 13 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase : int = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
lowercase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 361 |
def lowerCAmelCase__ ( _a : float , _a : float ):
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 0 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _A ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(A__ ):
requests.request('''GET''' , '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''' , '''https://huggingface.co''' , timeout=1.0 )
@pytest.mark.integration
def _A ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''' , '''https://huggingface.co''' )
def _A ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(A__ ):
http_head('''https://huggingface.co''' )
| 104 |
'''simple docstring'''
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
_lowerCamelCase : Optional[int] = logging.getLogger(__name__)
def __lowerCamelCase ( A__=2 , A__=3 , A__=16 , A__ = 10 , A__ = 2 ) -> int:
"""simple docstring"""
def get_dataset(A__ ):
UpperCamelCase = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(A__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
UpperCamelCase = get_dataset(A__ )
UpperCamelCase = get_dataset(A__ )
UpperCamelCase = DataLoader(A__ , shuffle=A__ , batch_size=A__ , num_workers=4 )
UpperCamelCase = DataLoader(A__ , shuffle=A__ , batch_size=A__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ , A__=None ) -> int:
"""simple docstring"""
UpperCamelCase = []
for epoch in range(A__ ):
# Train quickly
model.train()
for batch in dataloader:
UpperCamelCase , UpperCamelCase = batch
UpperCamelCase = model(A__ )
UpperCamelCase = torch.nn.functional.mse_loss(A__ , A__ )
accelerator.backward(A__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple ):
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.Parameter(torch.randn(1 ) )
UpperCamelCase = nn.Parameter(torch.randn(1 ) )
def A ( self : str , UpperCamelCase__ : Dict ):
"""simple docstring"""
return x * self.a + self.b
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def A ( self : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCamelCase = DummyModel()
UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCamelCase , UpperCamelCase = dummy_dataloaders()
UpperCamelCase = ProjectConfiguration(total_limit=1 , project_dir=UpperCamelCase__ , automatic_checkpoint_naming=UpperCamelCase__ )
# Train baseline
UpperCamelCase = Accelerator(project_config=UpperCamelCase__ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def A ( self : Optional[int] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCamelCase = DummyModel()
UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCamelCase , UpperCamelCase = dummy_dataloaders()
# Train baseline
UpperCamelCase = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save initial
UpperCamelCase = os.path.join(UpperCamelCase__ , 'initial' )
accelerator.save_state(UpperCamelCase__ )
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
UpperCamelCase = train(3 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCamelCase = DummyModel()
UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCamelCase , UpperCamelCase = dummy_dataloaders()
UpperCamelCase = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
accelerator.load_state(UpperCamelCase__ )
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = train(2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save everything
UpperCamelCase = os.path.join(UpperCamelCase__ , 'checkpoint' )
accelerator.save_state(UpperCamelCase__ )
# Load everything back in and make sure all states work
accelerator.load_state(UpperCamelCase__ )
test_rands += train(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCamelCase = DummyModel()
UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCamelCase , UpperCamelCase = dummy_dataloaders()
UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase__ )
# Train baseline
UpperCamelCase = Accelerator(project_dir=UpperCamelCase__ , project_config=UpperCamelCase__ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save initial
accelerator.save_state()
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
UpperCamelCase = train(3 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCamelCase = DummyModel()
UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCamelCase , UpperCamelCase = dummy_dataloaders()
UpperCamelCase = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=UpperCamelCase__ )
UpperCamelCase = Accelerator(project_dir=UpperCamelCase__ , project_config=UpperCamelCase__ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
accelerator.load_state(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_0' ) )
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = train(2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_1' ) )
test_rands += train(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = torch.tensor([1, 2, 3] )
UpperCamelCase = torch.tensor([2, 3, 4] )
UpperCamelCase = DummyModel()
UpperCamelCase = torch.optim.Adam(net.parameters() )
UpperCamelCase = Accelerator()
with self.assertRaises(UpperCamelCase__ ) as ve:
accelerator.register_for_checkpointing(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def A ( self : Dict ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCamelCase = DummyModel()
UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCamelCase = torch.optim.lr_scheduler.StepLR(UpperCamelCase__ , step_size=1 , gamma=0.9_9 )
UpperCamelCase , UpperCamelCase = dummy_dataloaders()
UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase__ )
# Train baseline
UpperCamelCase = Accelerator(project_dir=UpperCamelCase__ , project_config=UpperCamelCase__ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save initial
accelerator.save_state()
UpperCamelCase = scheduler.state_dict()
train(3 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
self.assertNotEqual(UpperCamelCase__ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_0' ) )
self.assertEqual(UpperCamelCase__ , scheduler.state_dict() )
def A ( self : List[str] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCamelCase = DummyModel()
UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase__ , total_limit=2 )
# Train baseline
UpperCamelCase = Accelerator(project_dir=UpperCamelCase__ , project_config=UpperCamelCase__ )
UpperCamelCase = accelerator.prepare(UpperCamelCase__ )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_10' ) ) )
@require_cuda
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCamelCase : Optional[int] = "/tmp/accelerate/state_checkpointing"
_lowerCamelCase : Union[str, Any] = DummyModel()
_lowerCamelCase : Optional[Any] = torch.optim.Adam(params=model.parameters(), lr=1e-3)
_lowerCamelCase : List[Any] = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
_lowerCamelCase ,_lowerCamelCase : Tuple = dummy_dataloaders()
_lowerCamelCase : List[Any] = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
_lowerCamelCase : Any = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase : Union[str, Any] = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
_lowerCamelCase ,_lowerCamelCase : Tuple = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
_lowerCamelCase : Any = group["params"][0].device
break
assert param_device.type == accelerator.device.type
_lowerCamelCase : Tuple = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
_lowerCamelCase : Optional[Any] = group["params"][0].device
break
assert (
param_device.type == torch.device("cpu").type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
_lowerCamelCase : Dict = group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 28 | 0 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
A_ : str =(
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
A_ : list[int] =[ord(letter) for letter in string.ascii_lowercase]
A_ : set[int] ={ord(char) for char in VALID_CHARS}
A_ : list[str] =["the", "be", "to", "of", "and", "in", "that", "have"]
def SCREAMING_SNAKE_CASE_ ( snake_case : list[int] , snake_case : tuple[int, ...] )-> str | None:
_lowerCamelCase = ""
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
for keychar, cipherchar in zip(cycle(snake_case ) , snake_case ):
_lowerCamelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(snake_case )
return decoded
def SCREAMING_SNAKE_CASE_ ( snake_case : list[int] )-> list[str]:
_lowerCamelCase = []
for key in product(snake_case , repeat=3 ):
_lowerCamelCase = try_key(snake_case , snake_case )
if encoded is not None:
possibles.append(snake_case )
return possibles
def SCREAMING_SNAKE_CASE_ ( snake_case : list[str] , snake_case : str )-> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def SCREAMING_SNAKE_CASE_ ( snake_case : str = "p059_cipher.txt" )-> int:
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = Path(snake_case ).parent.joinpath(snake_case ).read_text(encoding='utf-8' )
_lowerCamelCase = [int(snake_case ) for number in data.strip().split(',' )]
_lowerCamelCase = filter_valid_chars(snake_case )
for common_word in COMMON_WORDS:
_lowerCamelCase = filter_common_word(snake_case , snake_case )
if len(snake_case ) == 1:
break
_lowerCamelCase = possibles[0]
return sum(ord(snake_case ) for char in decoded_text )
if __name__ == "__main__":
print(f'{solution() = }')
| 80 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
A_ : Any =logging.get_logger(__name__)
A_ : Dict =Dict[str, Any]
A_ : Dict =List[Prediction]
@add_end_docstrings(lowerCAmelCase__ )
class __a ( lowerCAmelCase__ ):
def __init__( self , *a__ , **a__ ):
super().__init__(*a__ , **a__ )
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , 'vision' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def snake_case_ ( self , **a__ ):
_lowerCamelCase = {}
if "threshold" in kwargs:
_lowerCamelCase = kwargs['threshold']
return {}, {}, postprocess_kwargs
def __call__( self , *a__ , **a__ ):
return super().__call__(*a__ , **a__ )
def snake_case_ ( self , a__ ):
_lowerCamelCase = load_image(a__ )
_lowerCamelCase = torch.IntTensor([[image.height, image.width]] )
_lowerCamelCase = self.image_processor(images=[image] , return_tensors='pt' )
if self.tokenizer is not None:
_lowerCamelCase = self.tokenizer(text=inputs['words'] , boxes=inputs['boxes'] , return_tensors='pt' )
_lowerCamelCase = target_size
return inputs
def snake_case_ ( self , a__ ):
_lowerCamelCase = model_inputs.pop('target_size' )
_lowerCamelCase = self.model(**a__ )
_lowerCamelCase = outputs.__class__({'target_size': target_size, **outputs} )
if self.tokenizer is not None:
_lowerCamelCase = model_inputs['bbox']
return model_outputs
def snake_case_ ( self , a__ , a__=0.9 ):
_lowerCamelCase = model_outputs['target_size']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
_lowerCamelCase , _lowerCamelCase = target_size[0].tolist()
def unnormalize(a__ ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 10_00),
(height * bbox[1] / 10_00),
(width * bbox[2] / 10_00),
(height * bbox[3] / 10_00),
] ) )
_lowerCamelCase , _lowerCamelCase = model_outputs['logits'].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
_lowerCamelCase = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
_lowerCamelCase = [unnormalize(a__ ) for bbox in model_outputs['bbox'].squeeze(0 )]
_lowerCamelCase = ['score', 'label', 'box']
_lowerCamelCase = [dict(zip(a__ , a__ ) ) for vals in zip(scores.tolist() , a__ , a__ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
_lowerCamelCase = self.image_processor.post_process_object_detection(a__ , a__ , a__ )
_lowerCamelCase = raw_annotations[0]
_lowerCamelCase = raw_annotation['scores']
_lowerCamelCase = raw_annotation['labels']
_lowerCamelCase = raw_annotation['boxes']
_lowerCamelCase = scores.tolist()
_lowerCamelCase = [self.model.config.idalabel[label.item()] for label in labels]
_lowerCamelCase = [self._get_bounding_box(a__ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
_lowerCamelCase = ['score', 'label', 'box']
_lowerCamelCase = [
dict(zip(a__ , a__ ) )
for vals in zip(raw_annotation['scores'] , raw_annotation['labels'] , raw_annotation['boxes'] )
]
return annotation
def snake_case_ ( self , a__ ):
if self.framework != "pt":
raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.' )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = box.int().tolist()
_lowerCamelCase = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 80 | 1 |
"""simple docstring"""
def a__ ( __SCREAMING_SNAKE_CASE ) -> int:
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: Optional[int] = F"Input value of [number={number}] must be an integer"
raise TypeError(__SCREAMING_SNAKE_CASE )
if number < 1:
__lowerCAmelCase: Tuple = F"Input value of [number={number}] must be > 0"
raise ValueError(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Union[str, Any] = 1
for i in range(1 , __SCREAMING_SNAKE_CASE ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 217 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
# Construct model
if gpta_config_file == "":
__lowerCAmelCase: Optional[int] = GPTaConfig()
else:
__lowerCAmelCase: List[str] = GPTaConfig.from_json_file(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = GPTaModel(__SCREAMING_SNAKE_CASE )
# Load weights from numpy
load_tf_weights_in_gpta(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model
__lowerCAmelCase: str = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
__lowerCAmelCase: List[Any] = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(__SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--gpt2_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
__A = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 217 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase__ : Dict = {'vocab_file': 'sentencepiece.bpe.model'}
UpperCamelCase__ : Optional[Any] = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
UpperCamelCase__ : Optional[Any] = {
'moussaKam/mbarthez': 1_024,
'moussaKam/barthez': 1_024,
'moussaKam/barthez-orangesum-title': 1_024,
}
UpperCamelCase__ : List[Any] = '▁'
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowerCamelCase , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase = None , **_lowerCamelCase , ) -> str:
# Mask token behave like a normal word, i.e. include the space before it
A_ : List[str] = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
A_ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
A_ : Dict = vocab_file
A_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCamelCase ) )
A_ : str = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
A_ : Optional[int] = len(self.sp_model ) - 1
A_ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> int:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : List[Any] = [self.cls_token_id]
A_ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ) -> List[str]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1]
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> Tuple:
A_ : Dict = [self.sep_token_id]
A_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
return len(self.sp_model )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : Union[str, Any] = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Tuple:
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Tuple:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
A_ : Dict = self.sp_model.PieceToId(_lowerCamelCase )
return spm_id if spm_id else self.unk_token_id
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> List[str]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Union[str, Any]:
A_ : List[Any] = []
A_ : Tuple = ''
A_ : Optional[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowerCamelCase ) + token
A_ : Union[str, Any] = True
A_ : Tuple = []
else:
current_sub_tokens.append(_lowerCamelCase )
A_ : str = False
out_string += self.sp_model.decode(_lowerCamelCase )
return out_string.strip()
def __getstate__( self ) -> Tuple:
A_ : Optional[Any] = self.__dict__.copy()
A_ : Union[str, Any] = None
return state
def __setstate__( self , _lowerCamelCase ) -> Union[str, Any]:
A_ : Dict = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A_ : Any = {}
A_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> Tuple:
if not os.path.isdir(_lowerCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
A_ : List[Any] = os.path.join(
_lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , """wb""" ) as fi:
A_ : Dict = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
| 362 |
'''simple docstring'''
UpperCamelCase__ : int = {str(digit): digit**5 for digit in range(10)}
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(a_ ) )
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(a_ ) )
if __name__ == "__main__":
print(solution())
| 164 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> list:
lowercase__: Any = len(__UpperCAmelCase )
for _ in range(__UpperCAmelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
lowercase__, lowercase__: Any = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
__A = list(range(1_0, 0, -1))
print(f'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 177 | """simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int:
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
lowercase__: Tuple = grid[0]
for row_n in range(1 , len(__UpperCAmelCase ) ):
lowercase__: Tuple = grid[row_n]
lowercase__: Dict = fill_row(__UpperCAmelCase , __UpperCAmelCase )
lowercase__: Union[str, Any] = grid[row_n]
return grid[-1][-1]
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> list:
current_row[0] += row_above[0]
for cell_n in range(1 , len(__UpperCAmelCase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 177 | 1 |
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(SCREAMING_SNAKE_CASE ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
__lowercase = logging.get_logger(__name__)
@dataclass
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase=False , __lowercase=False , __lowercase=6.0 , __lowercase=None , __lowercase=False , __lowercase=False , __lowercase=None , __lowercase="fp4" , __lowercase=False , **__lowercase , ) -> Tuple:
__UpperCamelCase :List[str] = load_in_abit
__UpperCamelCase :Union[str, Any] = load_in_abit
__UpperCamelCase :str = llm_inta_threshold
__UpperCamelCase :List[str] = llm_inta_skip_modules
__UpperCamelCase :Any = llm_inta_enable_fpaa_cpu_offload
__UpperCamelCase :List[Any] = llm_inta_has_fpaa_weight
__UpperCamelCase :str = bnb_abit_quant_type
__UpperCamelCase :Optional[int] = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
__UpperCamelCase :Tuple = torch.floataa
elif isinstance(__lowercase , __lowercase):
__UpperCamelCase :Union[str, Any] = getattr(__lowercase , __lowercase)
elif isinstance(__lowercase , torch.dtype):
__UpperCamelCase :int = bnb_abit_compute_dtype
else:
raise ValueError('''bnb_4bit_compute_dtype must be a string or a torch.dtype''')
self.post_init()
def UpperCamelCase__ ( self) -> Union[str, Any]:
if not isinstance(self.llm_inta_threshold , __lowercase):
raise ValueError('''llm_int8_threshold must be a float''')
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , __lowercase):
raise ValueError('''llm_int8_skip_modules must be a list of strings''')
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , __lowercase):
raise ValueError('''llm_int8_enable_fp32_cpu_offload must be a boolean''')
if not isinstance(self.llm_inta_has_fpaa_weight , __lowercase):
raise ValueError('''llm_int8_has_fp16_weight must be a boolean''')
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype):
raise ValueError('''bnb_4bit_compute_dtype must be torch.dtype''')
if not isinstance(self.bnb_abit_quant_type , __lowercase):
raise ValueError('''bnb_4bit_quant_type must be a string''')
if not isinstance(self.bnb_abit_use_double_quant , __lowercase):
raise ValueError('''bnb_4bit_use_double_quant must be a boolean''')
if self.load_in_abit and not version.parse(importlib.metadata.version('''bitsandbytes''')) >= version.parse(
'''0.39.0'''):
raise ValueError(
'''4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version''')
def UpperCamelCase__ ( self) -> Any:
return self.load_in_abit or self.load_in_abit
def UpperCamelCase__ ( self) -> List[Any]:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def UpperCamelCase__ ( cls , __lowercase , __lowercase , **__lowercase) -> List[str]:
__UpperCamelCase :Optional[int] = cls(**__lowercase)
__UpperCamelCase :Optional[Any] = []
for key, value in kwargs.items():
if hasattr(__lowercase , __lowercase):
setattr(__lowercase , __lowercase , __lowercase)
to_remove.append(__lowercase)
for key in to_remove:
kwargs.pop(__lowercase , __lowercase)
if return_unused_kwargs:
return config, kwargs
else:
return config
def UpperCamelCase__ ( self , __lowercase) -> Union[str, Any]:
with open(__lowercase , '''w''' , encoding='''utf-8''') as writer:
__UpperCamelCase :Optional[int] = self.to_dict()
__UpperCamelCase :Optional[int] = json.dumps(__lowercase , indent=2 , sort_keys=__lowercase) + '''\n'''
writer.write(__lowercase)
def UpperCamelCase__ ( self) -> Dict[str, Any]:
__UpperCamelCase :Optional[Any] = copy.deepcopy(self.__dict__)
__UpperCamelCase :Optional[int] = str(output['''bnb_4bit_compute_dtype''']).split('''.''')[1]
return output
def __repr__( self) -> Dict:
return f"""{self.__class__.__name__} {self.to_json_string()}"""
def UpperCamelCase__ ( self , __lowercase = True) -> str:
if use_diff is True:
__UpperCamelCase :Union[str, Any] = self.to_diff_dict()
else:
__UpperCamelCase :Dict = self.to_dict()
return json.dumps(__lowercase , indent=2 , sort_keys=__lowercase) + "\n"
def UpperCamelCase__ ( self) -> Dict[str, Any]:
__UpperCamelCase :Union[str, Any] = self.to_dict()
# get the default config dict
__UpperCamelCase :Optional[Any] = BitsAndBytesConfig().to_dict()
__UpperCamelCase :str = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
__UpperCamelCase :str = value
return serializable_config_dict
| 105 | 1 |
def lowerCAmelCase_ ( __a ) -> List[Any]:
"""simple docstring"""
if collection == []:
return []
# get some information about the collection
lowerCamelCase__: List[Any] =len(__a )
lowerCamelCase__: List[str] =max(__a )
lowerCamelCase__: Dict =min(__a )
# create the counting array
lowerCamelCase__: Tuple =coll_max + 1 - coll_min
lowerCamelCase__: Optional[int] =[0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , __a ):
lowerCamelCase__: int =counting_arr[i] + counting_arr[i - 1]
# create the output collection
lowerCamelCase__: Dict =[0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , __a ) ):
lowerCamelCase__: int =collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def lowerCAmelCase_ ( __a ) -> str:
"""simple docstring"""
return "".join([chr(__a ) for i in counting_sort([ord(__a ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt"
__A = input("Enter numbers separated by a comma:\n").strip()
__A = [int(item) for item in user_input.split(",")]
print(counting_sort(unsorted))
| 10 |
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"kwargs, expected" , [
({"num_shards": 0, "max_num_jobs": 1}, []),
({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]),
({"num_shards": 10, "max_num_jobs": 10}, [range(_lowerCamelCase , i + 1 ) for i in range(10 )]),
({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]),
({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = _distribute_shards(**_lowerCamelCase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, max_num_jobs, expected" , [
({"foo": 0}, 10, [{"foo": 0}]),
({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]),
({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]),
({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]),
({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]),
] , )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = _split_gen_kwargs(_lowerCamelCase , _lowerCamelCase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, expected" , [
({"foo": 0}, 1),
({"shards": [0]}, 1),
({"shards": [0, 1, 2, 3]}, 4),
({"shards": [0, 1, 2, 3], "foo": 0}, 4),
({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4),
({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError),
] , )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if expected is RuntimeError:
with pytest.raises(_lowerCamelCase ):
_number_of_shards_in_gen_kwargs(_lowerCamelCase )
else:
_lowerCAmelCase : Optional[int] = _number_of_shards_in_gen_kwargs(_lowerCamelCase )
assert out == expected
| 36 | 0 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_UpperCAmelCase : int = logging.get_logger(__name__)
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Any = ["pixel_values"]
def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BICUBIC , A_ = True , A_ = None , A_ = True , A_ = 1 / 255 , A_ = True , A_ = IMAGENET_DEFAULT_MEAN , A_ = IMAGENET_DEFAULT_STD , **A_ , ) -> None:
"""simple docstring"""
super().__init__(**A_ )
UpperCamelCase = size if size is not None else {'shortest_edge': 224}
UpperCamelCase = get_size_dict(A_ , default_to_square=A_ )
UpperCamelCase = crop_size if crop_size is not None else {'height': 224, 'width': 224}
UpperCamelCase = get_size_dict(A_ , param_name='crop_size' )
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = resample
UpperCamelCase = do_center_crop
UpperCamelCase = crop_size
UpperCamelCase = do_rescale
UpperCamelCase = rescale_factor
UpperCamelCase = do_normalize
UpperCamelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCamelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __UpperCamelCase ( self , A_ , A_ , A_ = PILImageResampling.BICUBIC , A_ = None , **A_ , ) -> np.ndarray:
"""simple docstring"""
UpperCamelCase = get_size_dict(A_ , default_to_square=A_ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
UpperCamelCase = int((256 / 224) * size['shortest_edge'] )
UpperCamelCase = get_resize_output_image_size(A_ , size=A_ , default_to_square=A_ )
UpperCamelCase = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
A_ , size=(size_dict['height'], size_dict['width']) , resample=A_ , data_format=A_ , **A_ )
def __UpperCamelCase ( self , A_ , A_ , A_ = None , **A_ , ) -> np.ndarray:
"""simple docstring"""
UpperCamelCase = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(A_ , size=(size['height'], size['width']) , data_format=A_ , **A_ )
def __UpperCamelCase ( self , A_ , A_ , A_ = None , **A_ , ) -> np.ndarray:
"""simple docstring"""
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ = None , **A_ , ) -> np.ndarray:
"""simple docstring"""
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def __UpperCamelCase ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ) -> BatchFeature:
"""simple docstring"""
UpperCamelCase = do_resize if do_resize is not None else self.do_resize
UpperCamelCase = resample if resample is not None else self.resample
UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase = image_mean if image_mean is not None else self.image_mean
UpperCamelCase = image_std if image_std is not None else self.image_std
UpperCamelCase = size if size is not None else self.size
UpperCamelCase = get_size_dict(A_ , default_to_square=A_ )
UpperCamelCase = crop_size if crop_size is not None else self.crop_size
UpperCamelCase = get_size_dict(A_ , param_name='crop_size' )
UpperCamelCase = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
UpperCamelCase = [to_numpy_array(A_ ) for image in images]
if do_resize:
UpperCamelCase = [self.resize(A_ , A_ , A_ ) for image in images]
if do_center_crop:
UpperCamelCase = [self.center_crop(A_ , A_ ) for image in images]
if do_rescale:
UpperCamelCase = [self.rescale(A_ , A_ ) for image in images]
if do_normalize:
UpperCamelCase = [self.normalize(A_ , A_ , A_ ) for image in images]
UpperCamelCase = [to_channel_dimension_format(A_ , A_ ) for image in images]
UpperCamelCase = {'pixel_values': images}
return BatchFeature(data=A_ , tensor_type=A_ )
| 110 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : List[Any] = KandinskyVaaControlnetImgaImgPipeline
__lowercase : Optional[Any] = ["image_embeds", "negative_image_embeds", "image", "hint"]
__lowercase : Any = ["image_embeds", "negative_image_embeds", "image", "hint"]
__lowercase : Union[str, Any] = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__lowercase : Optional[int] = False
@property
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self.time_input_dim
@property
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return 100
@property
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
UpperCamelCase = UNetaDConditionModel(**A_ )
return model
@property
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.dummy_unet
UpperCamelCase = self.dummy_movq
UpperCamelCase = {
'num_train_timesteps': 1_000,
'beta_schedule': 'linear',
'beta_start': 0.0_0085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
UpperCamelCase = DDIMScheduler(**A_ )
UpperCamelCase = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __UpperCamelCase ( self , A_ , A_=0 ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
A_ )
# create init_image
UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase = Image.fromarray(np.uinta(A_ ) ).convert('RGB' ).resize((256, 256) )
# create hint
UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ )
if str(A_ ).startswith('mps' ):
UpperCamelCase = torch.manual_seed(A_ )
else:
UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = 'cpu'
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = self.pipeline_class(**A_ )
UpperCamelCase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase = pipe(**self.get_dummy_inputs(A_ ) )
UpperCamelCase = output.images
UpperCamelCase = pipe(
**self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0]
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase = np.array(
[0.5498_5034, 0.5550_9365, 0.5256_1504, 0.557_0494, 0.559_3818, 0.526_3979, 0.5028_5643, 0.506_9846, 0.5119_6736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
UpperCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
UpperCamelCase = init_image.resize((512, 512) )
UpperCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
UpperCamelCase = torch.from_numpy(np.array(A_ ) ).float() / 255.0
UpperCamelCase = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
UpperCamelCase = 'A robot, 4k photo'
UpperCamelCase = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(A_ )
UpperCamelCase = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
UpperCamelCase = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
UpperCamelCase = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase , UpperCamelCase = pipe_prior(
A_ , image=A_ , strength=0.85 , generator=A_ , negative_prompt='' , ).to_tuple()
UpperCamelCase = pipeline(
image=A_ , image_embeds=A_ , negative_image_embeds=A_ , hint=A_ , generator=A_ , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='np' , )
UpperCamelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(A_ , A_ )
| 110 | 1 |
'''simple docstring'''
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
a__ : Dict = False
a__ : List[str] = logging.get_logger(__name__)
a__ : int = 'ybelkada/fonts'
def _UpperCamelCase ( ) -> Optional[int]:
'''simple docstring'''
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '''
"Pix2StructImageProcessor. Please upgrade torch." )
def _UpperCamelCase ( __A , __A , __A ) -> Tuple:
'''simple docstring'''
requires_backends(__A , ["torch"] )
_check_torch_version()
UpperCamelCase__ = image_tensor.unsqueeze(0 )
UpperCamelCase__ = torch.nn.functional.unfold(__A , (patch_height, patch_width) , stride=(patch_height, patch_width) )
UpperCamelCase__ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , __A , __A , -1 )
UpperCamelCase__ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def _UpperCamelCase ( __A , __A = 36 , __A = "black" , __A = "white" , __A = 5 , __A = 5 , __A = 5 , __A = 5 , __A = None , __A = None , ) -> Image.Image:
'''simple docstring'''
requires_backends(__A , "vision" )
# Add new lines so that each line is no more than 80 characters.
UpperCamelCase__ = textwrap.TextWrapper(width=80 )
UpperCamelCase__ = wrapper.wrap(text=__A )
UpperCamelCase__ = "\n".join(__A )
if font_bytes is not None and font_path is None:
UpperCamelCase__ = io.BytesIO(__A )
elif font_path is not None:
UpperCamelCase__ = font_path
else:
UpperCamelCase__ = hf_hub_download(__A , "Arial.TTF" )
UpperCamelCase__ = ImageFont.truetype(__A , encoding="UTF-8" , size=__A )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
UpperCamelCase__ = ImageDraw.Draw(Image.new("RGB" , (1, 1) , __A ) )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = temp_draw.textbbox((0, 0) , __A , __A )
# Create the actual image with a bit of padding around the text.
UpperCamelCase__ = text_width + left_padding + right_padding
UpperCamelCase__ = text_height + top_padding + bottom_padding
UpperCamelCase__ = Image.new("RGB" , (image_width, image_height) , __A )
UpperCamelCase__ = ImageDraw.Draw(__A )
draw.text(xy=(left_padding, top_padding) , text=__A , fill=__A , font=__A )
return image
def _UpperCamelCase ( __A , __A , **__A ) -> List[str]:
'''simple docstring'''
requires_backends(__A , "vision" )
# Convert to PIL image if necessary
UpperCamelCase__ = to_pil_image(__A )
UpperCamelCase__ = render_text(__A , **__A )
UpperCamelCase__ = max(header_image.width , image.width )
UpperCamelCase__ = int(image.height * (new_width / image.width) )
UpperCamelCase__ = int(header_image.height * (new_width / header_image.width) )
UpperCamelCase__ = Image.new("RGB" , (new_width, new_height + new_header_height) , "white" )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
UpperCamelCase__ = to_numpy_array(__A )
if infer_channel_dimension_format(__A ) == ChannelDimension.LAST:
UpperCamelCase__ = to_channel_dimension_format(__A , ChannelDimension.LAST )
return new_image
class lowercase_ ( a__ ):
__UpperCAmelCase = ['flattened_patches']
def __init__( self , a = True , a = True , a = None , a = 20_48 , a = False , **a , ):
super().__init__(**a )
UpperCamelCase__ = patch_size if patch_size is not None else {"height": 16, "width": 16}
UpperCamelCase__ = do_normalize
UpperCamelCase__ = do_convert_rgb
UpperCamelCase__ = max_patches
UpperCamelCase__ = is_vqa
def __a ( self , a , a , a , **a ):
requires_backends(self.extract_flattened_patches , "torch" )
_check_torch_version()
# convert to torch
UpperCamelCase__ = to_channel_dimension_format(a , ChannelDimension.FIRST )
UpperCamelCase__ = torch.from_numpy(a )
UpperCamelCase__ , UpperCamelCase__ = patch_size["height"], patch_size["width"]
UpperCamelCase__ , UpperCamelCase__ = get_image_size(a )
# maximize scale s.t.
UpperCamelCase__ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
UpperCamelCase__ = max(min(math.floor(scale * image_height / patch_height ) , a ) , 1 )
UpperCamelCase__ = max(min(math.floor(scale * image_width / patch_width ) , a ) , 1 )
UpperCamelCase__ = max(num_feasible_rows * patch_height , 1 )
UpperCamelCase__ = max(num_feasible_cols * patch_width , 1 )
UpperCamelCase__ = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="bilinear" , align_corners=a , antialias=a , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
UpperCamelCase__ = torch_extract_patches(a , a , a )
UpperCamelCase__ = patches.shape
UpperCamelCase__ = patches_shape[1]
UpperCamelCase__ = patches_shape[2]
UpperCamelCase__ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
UpperCamelCase__ = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
UpperCamelCase__ = torch.arange(a ).reshape([rows, 1] ).repeat(1 , a ).reshape([rows * columns, 1] )
UpperCamelCase__ = torch.arange(a ).reshape([1, columns] ).repeat(a , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
UpperCamelCase__ = row_ids.to(torch.floataa )
UpperCamelCase__ = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
UpperCamelCase__ = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
UpperCamelCase__ = torch.nn.functional.pad(a , [0, 0, 0, max_patches - (rows * columns)] ).float()
UpperCamelCase__ = to_numpy_array(a )
return result
def __a ( self , a , a = None , **a ):
if image.dtype == np.uinta:
UpperCamelCase__ = image.astype(np.floataa )
# take mean across the whole `image`
UpperCamelCase__ = np.mean(a )
UpperCamelCase__ = np.std(a )
UpperCamelCase__ = max(a , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(a , mean=a , std=a , **a )
def __a ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ):
UpperCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase__ = patch_size if patch_size is not None else self.patch_size
UpperCamelCase__ = max_patches if max_patches is not None else self.max_patches
UpperCamelCase__ = self.is_vqa
if kwargs.get("data_format" , a ) is not None:
raise ValueError("data_format is not an accepted input as the outputs are " )
UpperCamelCase__ = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase__ = [convert_to_rgb(a ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(a ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("A header text must be provided for VQA models." )
UpperCamelCase__ = kwargs.pop("font_bytes" , a )
UpperCamelCase__ = kwargs.pop("font_path" , a )
if isinstance(a , a ):
UpperCamelCase__ = [header_text] * len(a )
UpperCamelCase__ = [
render_header(a , header_text[i] , font_bytes=a , font_path=a )
for i, image in enumerate(a )
]
if do_normalize:
UpperCamelCase__ = [self.normalize(image=a ) for image in images]
# convert to torch tensor and permute
UpperCamelCase__ = [
self.extract_flattened_patches(image=a , max_patches=a , patch_size=a )
for image in images
]
# create attention mask in numpy
UpperCamelCase__ = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
UpperCamelCase__ = BatchFeature(
data={"flattened_patches": images, "attention_mask": attention_masks} , tensor_type=a )
return encoded_outputs
| 80 |
'''simple docstring'''
from __future__ import annotations
import math
def _UpperCamelCase ( __A , __A ) -> list:
'''simple docstring'''
if len(__A ) != 2 or len(a[0] ) != 2 or len(__A ) != 2 or len(b[0] ) != 2:
raise Exception("Matrices are not 2x2" )
UpperCamelCase__ = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def _UpperCamelCase ( __A , __A ) -> str:
'''simple docstring'''
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__A ) )
]
def _UpperCamelCase ( __A , __A ) -> Union[str, Any]:
'''simple docstring'''
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__A ) )
]
def _UpperCamelCase ( __A ) -> tuple[list, list, list, list]:
'''simple docstring'''
if len(__A ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("Odd matrices are not supported!" )
UpperCamelCase__ = len(__A )
UpperCamelCase__ = matrix_length // 2
UpperCamelCase__ = [[a[i][j] for j in range(__A , __A )] for i in range(__A )]
UpperCamelCase__ = [
[a[i][j] for j in range(__A , __A )] for i in range(__A , __A )
]
UpperCamelCase__ = [[a[i][j] for j in range(__A )] for i in range(__A )]
UpperCamelCase__ = [[a[i][j] for j in range(__A )] for i in range(__A , __A )]
return top_left, top_right, bot_left, bot_right
def _UpperCamelCase ( __A ) -> tuple[int, int]:
'''simple docstring'''
return len(__A ), len(matrix[0] )
def _UpperCamelCase ( __A ) -> None:
'''simple docstring'''
print("\n".join(str(__A ) for line in matrix ) )
def _UpperCamelCase ( __A , __A ) -> list:
'''simple docstring'''
if matrix_dimensions(__A ) == (2, 2):
return default_matrix_multiplication(__A , __A )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = split_matrix(__A )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = split_matrix(__A )
UpperCamelCase__ = actual_strassen(__A , matrix_subtraction(__A , __A ) )
UpperCamelCase__ = actual_strassen(matrix_addition(__A , __A ) , __A )
UpperCamelCase__ = actual_strassen(matrix_addition(__A , __A ) , __A )
UpperCamelCase__ = actual_strassen(__A , matrix_subtraction(__A , __A ) )
UpperCamelCase__ = actual_strassen(matrix_addition(__A , __A ) , matrix_addition(__A , __A ) )
UpperCamelCase__ = actual_strassen(matrix_subtraction(__A , __A ) , matrix_addition(__A , __A ) )
UpperCamelCase__ = actual_strassen(matrix_subtraction(__A , __A ) , matrix_addition(__A , __A ) )
UpperCamelCase__ = matrix_addition(matrix_subtraction(matrix_addition(__A , __A ) , __A ) , __A )
UpperCamelCase__ = matrix_addition(__A , __A )
UpperCamelCase__ = matrix_addition(__A , __A )
UpperCamelCase__ = matrix_subtraction(matrix_subtraction(matrix_addition(__A , __A ) , __A ) , __A )
# construct the new matrix from our 4 quadrants
UpperCamelCase__ = []
for i in range(len(__A ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__A ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def _UpperCamelCase ( __A , __A ) -> list:
'''simple docstring'''
if matrix_dimensions(__A )[1] != matrix_dimensions(__A )[0]:
UpperCamelCase__ = (
"Unable to multiply these matrices, please check the dimensions.\n"
F'''Matrix A: {matrixa}\n'''
F'''Matrix B: {matrixa}'''
)
raise Exception(__A )
UpperCamelCase__ = matrix_dimensions(__A )
UpperCamelCase__ = matrix_dimensions(__A )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
UpperCamelCase__ = max(*__A , *__A )
UpperCamelCase__ = int(math.pow(2 , math.ceil(math.loga(__A ) ) ) )
UpperCamelCase__ = matrixa
UpperCamelCase__ = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __A ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __A ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __A ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
UpperCamelCase__ = actual_strassen(__A , __A )
# Removing the additional zeros
for i in range(0 , __A ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __A ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
a__ : int = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
a__ : str = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 80 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : str , __a : Optional[NestedDataStructureLike[PathLike]] = None , __a : Optional[NamedSplit] = None , __a : Optional[Features] = None , __a : str = None , __a : bool = False , __a : bool = False , __a : Optional[int] = None , **__a : Tuple , ):
_a = path_or_paths
_a = split if split or isinstance(__a , __a ) else "train"
_a = features
_a = cache_dir
_a = keep_in_memory
_a = streaming
_a = num_proc
_a = kwargs
@abstractmethod
def UpperCamelCase__ ( self : List[str] ):
pass
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __a : Optional[Features] = None , __a : str = None , __a : bool = False , __a : bool = False , __a : Optional[int] = None , **__a : int , ):
_a = features
_a = cache_dir
_a = keep_in_memory
_a = streaming
_a = num_proc
_a = kwargs
@abstractmethod
def UpperCamelCase__ ( self : List[Any] ):
pass
| 352 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : int = '▁'
lowerCAmelCase_ : Optional[Any] = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
lowerCAmelCase_ : Optional[int] = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
lowerCAmelCase_ : List[str] = {
'facebook/s2t-small-librispeech-asr': 10_24,
}
lowerCAmelCase_ : List[Any] = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
lowerCAmelCase_ : Union[str, Any] = {'mustc': MUSTC_LANGS}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =PRETRAINED_VOCAB_FILES_MAP
__a =MAX_MODEL_INPUT_SIZES
__a =['input_ids', 'attention_mask']
__a =[]
def __init__( self : Optional[Any] , __a : Optional[Any] , __a : Any , __a : Any="<s>" , __a : List[str]="</s>" , __a : str="<pad>" , __a : List[str]="<unk>" , __a : Union[str, Any]=False , __a : Any=False , __a : List[str]=None , __a : Optional[int]=None , __a : Optional[Dict[str, Any]] = None , **__a : int , ):
_a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , pad_token=__a , do_upper_case=__a , do_lower_case=__a , tgt_lang=__a , lang_codes=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
_a = do_upper_case
_a = do_lower_case
_a = load_json(__a )
_a = {v: k for k, v in self.encoder.items()}
_a = spm_file
_a = load_spm(__a , self.sp_model_kwargs )
if lang_codes is not None:
_a = lang_codes
_a = LANGUAGES[lang_codes]
_a = [f'<lang:{lang}>' for lang in self.langs]
_a = {lang: self.sp_model.PieceToId(f'<lang:{lang}>' ) for lang in self.langs}
_a = self.lang_tokens
_a = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
_a = {}
@property
def UpperCamelCase__ ( self : str ):
return len(self.encoder )
@property
def UpperCamelCase__ ( self : str ):
return self._tgt_lang
@tgt_lang.setter
def UpperCamelCase__ ( self : Optional[int] , __a : Any ):
_a = new_tgt_lang
self.set_tgt_lang_special_tokens(__a )
def UpperCamelCase__ ( self : List[Any] , __a : str ):
_a = self.lang_code_to_id[tgt_lang]
_a = [lang_code_id]
def UpperCamelCase__ ( self : Dict , __a : str ):
return self.sp_model.encode(__a , out_type=__a )
def UpperCamelCase__ ( self : List[str] , __a : Any ):
return self.encoder.get(__a , self.encoder[self.unk_token] )
def UpperCamelCase__ ( self : str , __a : int ):
return self.decoder.get(__a , self.unk_token )
def UpperCamelCase__ ( self : str , __a : List[str] ):
_a = []
_a = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
_a = self.sp_model.decode(__a )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
_a = []
else:
current_sub_tokens.append(__a )
_a = self.sp_model.decode(__a )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def UpperCamelCase__ ( self : int , __a : Any , __a : int=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCamelCase__ ( self : Any , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
_a = [1] * len(self.prefix_tokens )
_a = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__a )) + suffix_ones
return prefix_ones + ([0] * len(__a )) + ([0] * len(__a )) + suffix_ones
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
_a = self.__dict__.copy()
_a = None
return state
def __setstate__( self : str , __a : Dict ):
_a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_a = {}
_a = load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCamelCase__ ( self : List[str] , __a : str , __a : Optional[str] = None ):
_a = Path(__a )
assert save_dir.is_dir(), f'{save_directory} should be a directory'
_a = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
_a = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , __a )
if os.path.abspath(self.spm_file ) != os.path.abspath(__a ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __a )
elif not os.path.isfile(self.spm_file ):
with open(__a , "wb" ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(__a )
return (str(__a ), str(__a ))
def _lowerCamelCase ( lowercase : str , lowercase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
_a = sentencepiece.SentencePieceProcessor(**lowercase )
spm.Load(str(lowercase ) )
return spm
def _lowerCamelCase ( lowercase : str ) -> Union[Dict, List]:
with open(lowercase , "r" ) as f:
return json.load(lowercase )
def _lowerCamelCase ( lowercase : Any , lowercase : str ) -> None:
with open(lowercase , "w" ) as f:
json.dump(lowercase , lowercase , indent=2 )
| 346 | 0 |
import math
def _UpperCamelCase ( lowercase__ = 100 ):
__SCREAMING_SNAKE_CASE : Dict = sum(i * i for i in range(1 , n + 1 ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 9 |
'''simple docstring'''
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class A :
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=30 , lowerCamelCase__=2 , lowerCamelCase__=3 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=32 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=10 , lowerCamelCase__=0.02 , lowerCamelCase__=3 , lowerCamelCase__=None , lowerCamelCase__=2 , ) -> Optional[int]:
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = scope
lowercase__ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = num_patches + 2
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
lowercase__ = DeiTModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowercase__ = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
lowercase__ = DeiTForMaskedImageModeling(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowercase__ = model(lowerCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = DeiTForMaskedImageModeling(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(lowerCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
'''simple docstring'''
lowercase__ = self.type_sequence_label_size
lowercase__ = DeiTForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowercase__ = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = DeiTForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : int = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowerCamelCase : Tuple = (
{
"""feature-extraction""": DeiTModel,
"""image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCamelCase : Any = False
lowerCamelCase : str = False
lowerCamelCase : str = False
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = DeiTModelTester(self )
lowercase__ = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def A__ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCamelCase__ )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def A__ ( self ) -> int:
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase__ )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> Dict:
'''simple docstring'''
lowercase__ = super()._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def A__ ( self ) -> Any:
'''simple docstring'''
if not self.model_tester.is_training:
return
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCamelCase__ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
lowercase__ = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
lowercase__ = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
lowercase__ = model(**lowerCamelCase__ ).loss
loss.backward()
def A__ ( self ) -> int:
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase__ = False
lowercase__ = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCamelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
lowercase__ = model_class(lowerCamelCase__ )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase__ )
model.train()
lowercase__ = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
lowercase__ = model(**lowerCamelCase__ ).loss
loss.backward()
def A__ ( self ) -> int:
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCamelCase__ ),
*get_values(lowerCamelCase__ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type['title']}''' ):
lowercase__ = problem_type["""title"""]
lowercase__ = problem_type["""num_labels"""]
lowercase__ = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
lowercase__ = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if problem_type["num_labels"] > 1:
lowercase__ = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
lowercase__ = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCamelCase__ ) as warning_list:
lowercase__ = model(**lowerCamelCase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def A__ ( self ) -> Any:
'''simple docstring'''
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = DeiTModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def _A ( ):
lowercase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def A__ ( self ) -> int:
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__ = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
lowerCamelCase__ )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=lowerCamelCase__ , return_tensors="""pt""" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
lowercase__ = model(**lowerCamelCase__ )
# verify the logits
lowercase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
lowercase__ = torch.tensor([-1.02_66, 0.19_12, -1.28_61] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__ = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=lowerCamelCase__ , return_tensors="""pt""" )
lowercase__ = inputs.pixel_values.to(lowerCamelCase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase__ = model(lowerCamelCase__ )
| 164 | 0 |
"""simple docstring"""
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def __A (_SCREAMING_SNAKE_CASE ) ->Any:
"""simple docstring"""
lowerCAmelCase__ :Dict = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE ) ->Any:
"""simple docstring"""
lowerCAmelCase__ :Dict = emb.weight.shape
lowerCAmelCase__ :Union[str, Any] = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :str = emb.weight.data
return lin_layer
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->Dict:
"""simple docstring"""
lowerCAmelCase__ :Tuple = {}
for old_key in state_dict.keys():
lowerCAmelCase__ :Tuple = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
lowerCAmelCase__ :Any = key.replace('moe_layer.experts.0' , F"ffn.experts.expert_{expert_idx}" )
else:
lowerCAmelCase__ :Optional[int] = key.replace('moe_layer.experts.' , 'ffn.experts.expert_' )
if "gate" in key:
lowerCAmelCase__ :List[str] = key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier' )
if "fc2" and "experts" not in key:
lowerCAmelCase__ :Tuple = key.replace('.fc2.' , '.ffn.fc2.' )
if "fc1" and "experts" not in key:
lowerCAmelCase__ :int = key.replace('.fc1.' , '.ffn.fc1.' )
if ".encoder_attn." in key:
lowerCAmelCase__ :Optional[Any] = key.replace('.encoder_attn.' , '.cross_attention.' )
if "encoder_attn_layer_norm" in key:
lowerCAmelCase__ :Tuple = key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm' )
if "final_layer_norm" in key:
lowerCAmelCase__ :int = key.replace('final_layer_norm' , 'ff_layer_norm' )
lowerCAmelCase__ :Optional[Any] = state_dict[old_key]
return new_dict
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = WEIGHTS_NAME ) ->Dict:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = []
lowerCAmelCase__ :Dict = 0
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
for expert in range(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Any = switch_checkpoint_path + F"-rank-{expert}.pt"
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Optional[int] = torch.load(_SCREAMING_SNAKE_CASE )['model']
remove_ignore_keys_(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[str] = rename_fairseq_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :str = os.path.join(
_SCREAMING_SNAKE_CASE , weights_name.replace('.bin' , F"-{len(_SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin" ) )
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_SCREAMING_SNAKE_CASE )[0]].dtype )
# Add the last block
lowerCAmelCase__ :Union[str, Any] = os.path.join(_SCREAMING_SNAKE_CASE , weights_name.replace('.bin' , F"-{len(_SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin" ) )
lowerCAmelCase__ :Tuple = torch.load(switch_checkpoint_path + '-shared.pt' )['model']
remove_ignore_keys_(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Any = rename_fairseq_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Optional[int] = shared_weights['decoder.embed_tokens.weight']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_SCREAMING_SNAKE_CASE ) == 1:
lowerCAmelCase__ :str = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Otherwise, let's build the index
lowerCAmelCase__ :Tuple = {}
for idx, shard in enumerate(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Optional[int] = weights_name.replace('.bin' , F"-{idx+1:05d}-of-{len(_SCREAMING_SNAKE_CASE ):05d}.bin" )
lowerCAmelCase__ :Union[str, Any] = os.path.join(_SCREAMING_SNAKE_CASE , weights_name.replace('.bin' , F"-{idx+1:05d}-of-???.bin" ) )
os.rename(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
for key in shard:
lowerCAmelCase__ :Union[str, Any] = shard_file
# Add the metadata
lowerCAmelCase__ :Any = {'total_size': total_size}
lowerCAmelCase__ :Union[str, Any] = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , 'w' , encoding='utf-8' ) as f:
lowerCAmelCase__ :Dict = json.dumps(_SCREAMING_SNAKE_CASE , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE ) + '\n'
f.write(_SCREAMING_SNAKE_CASE )
return metadata, index
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
__A = parser.parse_args()
__A , __A = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
__A = NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
__A = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path)
| 367 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCAmelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Tuple = IFInpaintingSuperResolutionPipeline
__magic_name__ :Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
__magic_name__ :Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} )
__magic_name__ :Optional[int] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def snake_case ( self ):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
if str(__UpperCAmelCase ).startswith('mps' ):
lowerCAmelCase__ :Dict = torch.manual_seed(__UpperCAmelCase )
else:
lowerCAmelCase__ :Union[str, Any] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
lowerCAmelCase__ :str = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
lowerCAmelCase__ :Dict = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
lowerCAmelCase__ :Dict = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def snake_case ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def snake_case ( self ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def snake_case ( self ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def snake_case ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def snake_case ( self ):
'''simple docstring'''
self._test_save_load_local()
def snake_case ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 254 | 0 |
"""simple docstring"""
import argparse
import os
import re
a : List[Any] = '''src/transformers/models/auto'''
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
a : List[str] = re.compile(R'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''')
# re pattern that matches identifiers in mappings
a : str = re.compile(R'''\s*\(\s*"(\S[^"]+)"''')
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple , _lowercase : bool = False ) ->Any:
'''simple docstring'''
with open(_lowercase , "r" , encoding="utf-8" ) as f:
a : List[Any] = f.read()
a : Dict = content.split("\n" )
a : List[str] = []
a : Any = 0
while line_idx < len(_lowercase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
a : List[str] = len(re.search(R"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
a : int = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
a : int = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
a : str = sorted(_lowercase , key=lambda _lowercase : _re_identifier.search(_lowercase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_lowercase , "w" , encoding="utf-8" ) as f:
f.write("\n".join(_lowercase ) )
elif "\n".join(_lowercase ) != content:
return True
def _SCREAMING_SNAKE_CASE ( _lowercase : bool = False ) ->Any:
'''simple docstring'''
a : List[str] = [os.path.join(_lowercase , _lowercase ) for f in os.listdir(_lowercase ) if f.endswith(".py" )]
a : Union[str, Any] = [sort_auto_mapping(_lowercase , overwrite=_lowercase ) for fname in fnames]
if not overwrite and any(_lowercase ):
a : List[Any] = [f for f, d in zip(_lowercase , _lowercase ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {', '.join(_lowercase )}. Run `make style` to fix"""
" this." )
if __name__ == "__main__":
a : Any = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
a : str = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 105 |
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] ) ->str:
'''simple docstring'''
a, a : str = image.size
a, a : Tuple = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
a : Union[str, Any] = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
a : int = np.array(_lowercase ).astype(np.floataa ) / 255.0
a : List[str] = image[None].transpose(0 , 3 , 1 , 2 )
a : Dict = torch.from_numpy(_lowercase )
return 2.0 * image - 1.0
class __UpperCamelCase ( a__ ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> str:
super().__init__()
self.register_modules(vqvae=lowerCAmelCase__ , unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
@torch.no_grad()
def __call__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 100 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = None , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(lowerCAmelCase__ , PIL.Image.Image ):
a : int = 1
elif isinstance(lowerCAmelCase__ , torch.Tensor ):
a : str = image.shape[0]
else:
raise ValueError(f"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowerCAmelCase__ )}""" )
if isinstance(lowerCAmelCase__ , PIL.Image.Image ):
a : Tuple = preprocess(lowerCAmelCase__ )
a, a : Optional[Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
a : Tuple = (batch_size, self.unet.config.in_channels // 2, height, width)
a : List[str] = next(self.unet.parameters() ).dtype
a : Any = randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=self.device , dtype=lowerCAmelCase__ )
a : Union[str, Any] = image.to(device=self.device , dtype=lowerCAmelCase__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(lowerCAmelCase__ , device=self.device )
a : int = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
a : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
a : str = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
a : List[Any] = {}
if accepts_eta:
a : Any = eta
for t in self.progress_bar(lowerCAmelCase__ ):
# concat latents and low resolution image in the channel dimension.
a : str = torch.cat([latents, image] , dim=1 )
a : int = self.scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
# predict the noise residual
a : Union[str, Any] = self.unet(lowerCAmelCase__ , lowerCAmelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
a : Tuple = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
# decode the image latents with the VQVAE
a : List[Any] = self.vqvae.decode(lowerCAmelCase__ ).sample
a : int = torch.clamp(lowerCAmelCase__ , -1.0 , 1.0 )
a : Optional[int] = image / 2 + 0.5
a : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a : Union[str, Any] = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase__ )
| 105 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_A = logging.get_logger(__name__)
class lowerCamelCase ( A_ ):
UpperCAmelCase__ : Union[str, Any] = ["pixel_values"]
def __init__(self : int , _A : bool = True , _A : Dict[str, int] = None , _A : int = 0.9 , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : bool = True , _A : Dict[str, int] = None , _A : Union[int, float] = 1 / 2_5_5 , _A : bool = True , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : Dict , ) -> None:
super().__init__(**_A )
snake_case = size if size is not None else {"shortest_edge": 2_2_4}
snake_case = get_size_dict(_A , default_to_square=_A )
snake_case = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
snake_case = get_size_dict(_A , param_name="crop_size" )
snake_case = do_resize
snake_case = size
snake_case = crop_pct
snake_case = resample
snake_case = do_center_crop
snake_case = crop_size
snake_case = do_rescale
snake_case = rescale_factor
snake_case = do_normalize
snake_case = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
snake_case = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase(self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[float] = None , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : str , ) -> np.ndarray:
snake_case = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f'size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
if crop_pct is not None:
if "shortest_edge" in size:
snake_case = int(size["shortest_edge"] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
snake_case = int(size["height"] / crop_pct )
else:
snake_case = (int(size["height"] / crop_pct ), int(size["width"] / crop_pct ))
else:
raise ValueError("Invalid size for resize: {}".format(_A ) )
snake_case = get_resize_output_image_size(_A , size=_A , default_to_square=_A )
else:
if "shortest_edge" in size:
snake_case = get_resize_output_image_size(_A , size=size["shortest_edge"] , default_to_square=_A )
elif "height" in size and "width" in size:
snake_case = (size["height"], size["width"])
else:
raise ValueError("Invalid size for resize: {}".format(_A ) )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def UpperCAmelCase(self : Optional[int] , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : str , ) -> np.ndarray:
snake_case = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f'size must contain \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(_A , size=(size["height"], size["width"]) , data_format=_A , **_A )
def UpperCAmelCase(self : Optional[Any] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : str , ) -> Any:
return rescale(_A , scale=_A , data_format=_A , **_A )
def UpperCAmelCase(self : Optional[Any] , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ) -> np.ndarray:
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def UpperCAmelCase(self : Dict , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : int = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : List[str] , ) -> PIL.Image.Image:
snake_case = do_resize if do_resize is not None else self.do_resize
snake_case = crop_pct if crop_pct is not None else self.crop_pct
snake_case = resample if resample is not None else self.resample
snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case = do_rescale if do_rescale is not None else self.do_rescale
snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case = do_normalize if do_normalize is not None else self.do_normalize
snake_case = image_mean if image_mean is not None else self.image_mean
snake_case = image_std if image_std is not None else self.image_std
snake_case = size if size is not None else self.size
snake_case = get_size_dict(_A , default_to_square=_A )
snake_case = crop_size if crop_size is not None else self.crop_size
snake_case = get_size_dict(_A , param_name="crop_size" )
snake_case = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_pct is None:
raise ValueError("Crop_pct must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
snake_case = [to_numpy_array(_A ) for image in images]
if do_resize:
snake_case = [self.resize(image=_A , size=_A , crop_pct=_A , resample=_A ) for image in images]
if do_center_crop:
snake_case = [self.center_crop(image=_A , size=_A ) for image in images]
if do_rescale:
snake_case = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
snake_case = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
snake_case = [to_channel_dimension_format(_A , _A ) for image in images]
snake_case = {"pixel_values": images}
return BatchFeature(data=_A , tensor_type=_A )
| 137 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase(self : Tuple ) -> List[str]:
snake_case = ["a", "b", "c"]
# Defaults to last layer if both are None
snake_case , snake_case = get_aligned_output_features_output_indices(_A , _A , _A )
self.assertEqual(_A , ["c"] )
self.assertEqual(_A , [2] )
# Out indices set to match out features
snake_case , snake_case = get_aligned_output_features_output_indices(["a", "c"] , _A , _A )
self.assertEqual(_A , ["a", "c"] )
self.assertEqual(_A , [0, 2] )
# Out features set to match out indices
snake_case , snake_case = get_aligned_output_features_output_indices(_A , [0, 2] , _A )
self.assertEqual(_A , ["a", "c"] )
self.assertEqual(_A , [0, 2] )
# Out features selected from negative indices
snake_case , snake_case = get_aligned_output_features_output_indices(_A , [-3, -1] , _A )
self.assertEqual(_A , ["a", "c"] )
self.assertEqual(_A , [-3, -1] )
def UpperCAmelCase(self : Optional[int] ) -> str:
# Stage names must be set
with self.assertRaises(_A ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , _A )
# Out features must be a list
with self.assertRaises(_A ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(_A ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(_A ):
verify_out_features_out_indices(_A , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(_A ):
verify_out_features_out_indices(_A , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(_A ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(_A ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(_A ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def UpperCAmelCase(self : List[str] ) -> str:
snake_case = BackboneMixin()
snake_case = ["a", "b", "c"]
snake_case = ["a", "c"]
snake_case = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
snake_case = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
snake_case = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 137 | 1 |
lowerCAmelCase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = f'a bytes-like object is required, not \'{data.__class__.__name__}\''
raise TypeError(SCREAMING_SNAKE_CASE )
lowercase__ = ''''''.join(bin(SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for byte in data )
lowercase__ = len(SCREAMING_SNAKE_CASE ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ = B'''=''' * ((6 - len(SCREAMING_SNAKE_CASE ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(SCREAMING_SNAKE_CASE ) % 6)
else:
lowercase__ = B''''''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(SCREAMING_SNAKE_CASE ) , 6 ) ).encode()
+ padding
)
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = (
'''argument should be a bytes-like object or ASCII string, '''
f'not \'{encoded_data.__class__.__name__}\''
)
raise TypeError(SCREAMING_SNAKE_CASE )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
try:
lowercase__ = encoded_data.decode('''utf-8''' )
except UnicodeDecodeError:
raise ValueError('''base64 encoded data should only contain ASCII characters''' )
lowercase__ = encoded_data.count('''=''' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(SCREAMING_SNAKE_CASE ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ = encoded_data[:-padding]
lowercase__ = ''''''.join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ = ''''''.join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(SCREAMING_SNAKE_CASE ) , 8 )
]
return bytes(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 110 |
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase = logging.getLogger(__name__)
lowerCAmelCase = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
lowerCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _a :
_lowercase : Optional[str] = field(
default=UpperCamelCase__ , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Leave None if you want to train a model from'''
''' scratch.'''
)
} , )
_lowercase : Optional[str] = field(
default=UpperCamelCase__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(UpperCamelCase__ )} , )
_lowercase : Optional[str] = field(
default=UpperCamelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_lowercase : Optional[str] = field(
default=UpperCamelCase__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_lowercase : Optional[str] = field(
default=UpperCamelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class _a :
_lowercase : Optional[str] = field(
default=UpperCamelCase__ , metadata={'''help''': '''The input training data file (a text file).'''} )
_lowercase : Optional[str] = field(
default=UpperCamelCase__ , metadata={
'''help''': (
'''The input training data files (multiple files in glob format). '''
'''Very often splitting large files to smaller files can prevent tokenizer going out of memory'''
)
} , )
_lowercase : Optional[str] = field(
default=UpperCamelCase__ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
_lowercase : Optional[str] = field(
default=UpperCamelCase__ , metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} , )
_lowercase : Optional[str] = field(
default=UpperCamelCase__ , metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} , )
_lowercase : bool = field(
default=UpperCamelCase__ , metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} , )
_lowercase : bool = field(
default=UpperCamelCase__ , metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''} )
_lowercase : bool = field(default=UpperCamelCase__ , metadata={'''help''': '''Whether ot not to use whole word mask.'''} )
_lowercase : float = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
_lowercase : float = field(
default=1 / 6 , metadata={
'''help''': (
'''Ratio of length of a span of masked tokens to surrounding context length for permutation language'''
''' modeling.'''
)
} , )
_lowercase : int = field(
default=5 , metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''} )
_lowercase : int = field(
default=-1 , metadata={
'''help''': (
'''Optional input sequence length after tokenization.'''
'''The training dataset will be truncated in block of this size for training.'''
'''Default to the model max input length for single sentence inputs (take into account special tokens).'''
)
} , )
_lowercase : bool = field(
default=UpperCamelCase__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , ):
"""simple docstring"""
def _dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=SCREAMING_SNAKE_CASE , file_path=SCREAMING_SNAKE_CASE , block_size=args.block_size , ref_path=SCREAMING_SNAKE_CASE , )
return LineByLineTextDataset(tokenizer=SCREAMING_SNAKE_CASE , file_path=SCREAMING_SNAKE_CASE , block_size=args.block_size )
else:
return TextDataset(
tokenizer=SCREAMING_SNAKE_CASE , file_path=SCREAMING_SNAKE_CASE , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=SCREAMING_SNAKE_CASE , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(SCREAMING_SNAKE_CASE ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def _a ( ):
"""simple docstring"""
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
lowercase__ = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowercase__ = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
lowercase__ = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
lowercase__ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowercase__ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
lowercase__ = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
lowercase__ = AutoModelWithLMHead.from_config(SCREAMING_SNAKE_CASE )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
lowercase__ = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
lowercase__ = min(data_args.block_size , tokenizer.max_len )
# Get datasets
lowercase__ = (
get_dataset(SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
lowercase__ = (
get_dataset(SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , evaluate=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
lowercase__ = DataCollatorForPermutationLanguageModeling(
tokenizer=SCREAMING_SNAKE_CASE , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
lowercase__ = DataCollatorForWholeWordMask(
tokenizer=SCREAMING_SNAKE_CASE , mlm_probability=data_args.mlm_probability )
else:
lowercase__ = DataCollatorForLanguageModeling(
tokenizer=SCREAMING_SNAKE_CASE , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowercase__ = Trainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , data_collator=SCREAMING_SNAKE_CASE , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , prediction_loss_only=SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
lowercase__ = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=SCREAMING_SNAKE_CASE )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase__ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase__ = trainer.evaluate()
lowercase__ = math.exp(eval_output['''eval_loss'''] )
lowercase__ = {'''perplexity''': perplexity}
lowercase__ = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(SCREAMING_SNAKE_CASE , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , SCREAMING_SNAKE_CASE , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(SCREAMING_SNAKE_CASE )
return results
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 110 | 1 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : int = 0
for ch in input_str:
lowerCAmelCase__ : Any = ord(A_ )
lowerCAmelCase__ : Any = pow(2 , A_ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 |
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
__UpperCamelCase : Union[str, Any] = namedtuple(
'''_TestCommandArgs''',
[
'''dataset''',
'''name''',
'''cache_dir''',
'''data_dir''',
'''all_configs''',
'''save_infos''',
'''ignore_verifications''',
'''force_redownload''',
'''clear_cache''',
],
defaults=[None, None, None, False, False, False, False, False],
)
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Dict = _TestCommandArgs(dataset=A_ , all_configs=A_ , save_infos=A_ )
lowerCAmelCase__ : Optional[int] = TestCommand(*A_ )
test_command.run()
lowerCAmelCase__ : int = os.path.join(A_ , '''README.md''' )
assert os.path.exists(A_ )
lowerCAmelCase__ : List[Any] = DatasetInfosDict.from_directory(A_ )
lowerCAmelCase__ : List[str] = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) , splits=[
{
'''name''': '''train''',
'''num_bytes''': 2_35_15_63,
'''num_examples''': 1_00_00,
},
{
'''name''': '''validation''',
'''num_bytes''': 23_84_18,
'''num_examples''': 10_00,
},
] , download_size=3_94_06_80 , dataset_size=2_58_99_81 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = getattr(dataset_infos['''default'''] , A_ ), getattr(expected_dataset_infos['''default'''] , A_ )
if key == "num_bytes":
assert is_apercent_close(A_ , A_ )
elif key == "splits":
assert list(A_ ) == list(A_ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 74 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class _lowercase :
'''simple docstring'''
def __init__( self :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any]=13 , lowerCAmelCase__ :List[Any]=7 , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :str=True , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :int=99 , lowerCAmelCase__ :Optional[Any]=32 , lowerCAmelCase__ :Optional[int]=2 , lowerCAmelCase__ :Tuple=4 , lowerCAmelCase__ :str=37 , lowerCAmelCase__ :int="gelu" , lowerCAmelCase__ :Any=0.1 , lowerCAmelCase__ :Union[str, Any]=0.1 , lowerCAmelCase__ :str=512 , lowerCAmelCase__ :Tuple=16 , lowerCAmelCase__ :Optional[Any]=2 , lowerCAmelCase__ :Optional[int]=0.02 , lowerCAmelCase__ :int=3 , lowerCAmelCase__ :Union[str, Any]=4 , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :Any=0 , ) -> str:
__SCREAMING_SNAKE_CASE : Any = parent
__SCREAMING_SNAKE_CASE : List[Any] = batch_size
__SCREAMING_SNAKE_CASE : Dict = seq_length
__SCREAMING_SNAKE_CASE : Dict = is_training
__SCREAMING_SNAKE_CASE : Tuple = use_input_mask
__SCREAMING_SNAKE_CASE : List[str] = use_token_type_ids
__SCREAMING_SNAKE_CASE : List[str] = use_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : List[str] = intermediate_size
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
__SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
__SCREAMING_SNAKE_CASE : str = type_vocab_size
__SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
__SCREAMING_SNAKE_CASE : int = initializer_range
__SCREAMING_SNAKE_CASE : Tuple = num_labels
__SCREAMING_SNAKE_CASE : Optional[Any] = num_choices
__SCREAMING_SNAKE_CASE : Dict = scope
__SCREAMING_SNAKE_CASE : Tuple = projection_dim
def __magic_name__( self :str ) -> Dict:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
__SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : Any = None
__SCREAMING_SNAKE_CASE : List[Any] = None
__SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : Optional[int] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
__SCREAMING_SNAKE_CASE : Any = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__( self :Tuple , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Tuple = TFDPRContextEncoder(config=_UpperCAmelCase )
__SCREAMING_SNAKE_CASE : int = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__SCREAMING_SNAKE_CASE : List[Any] = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__SCREAMING_SNAKE_CASE : Tuple = model(_UpperCAmelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Tuple = TFDPRQuestionEncoder(config=_UpperCAmelCase )
__SCREAMING_SNAKE_CASE : int = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__SCREAMING_SNAKE_CASE : Tuple = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__SCREAMING_SNAKE_CASE : Dict = model(_UpperCAmelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __magic_name__( self :str , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any] ) -> List[str]:
__SCREAMING_SNAKE_CASE : int = TFDPRReader(config=_UpperCAmelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def __magic_name__( self :Optional[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) : List[str] = config_and_inputs
__SCREAMING_SNAKE_CASE : str = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class _lowercase ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE__ : List[str] = {"""feature-extraction""": TFDPRQuestionEncoder} if is_tf_available() else {}
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : Optional[int] = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
def __magic_name__( self :Optional[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = TFDPRModelTester(self )
__SCREAMING_SNAKE_CASE : str = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def __magic_name__( self :Optional[int] ) -> int:
self.config_tester.run_common_tests()
def __magic_name__( self :Any ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*_UpperCAmelCase )
def __magic_name__( self :List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*_UpperCAmelCase )
def __magic_name__( self :List[str] ) -> Dict:
__SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*_UpperCAmelCase )
@slow
def __magic_name__( self :Dict ) -> Tuple:
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Dict = TFDPRContextEncoder.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Tuple = TFDPRContextEncoder.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Optional[Any] = TFDPRQuestionEncoder.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Dict = TFDPRReader.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@require_tf
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __magic_name__( self :str ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tf.constant(
[[101, 7_592, 1_010, 2_003, 2_026, 3_899, 10_140, 1_029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(_UpperCAmelCase )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
__SCREAMING_SNAKE_CASE : List[str] = tf.constant(
[
[
0.0323_6253,
0.1275_3335,
0.1681_8509,
0.0027_9786,
0.389_6933,
0.2426_4945,
0.217_8971,
-0.0233_5227,
-0.0848_1959,
-0.1432_4117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 9 |
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
UpperCAmelCase_ = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
UpperCAmelCase_ = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
UpperCAmelCase_ = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
UpperCAmelCase_ = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
UpperCAmelCase_ = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str]=[1, 10, 1_00] , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : Any=3.0 ):
"""simple docstring"""
if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("""This metric is currently not supported on Windows.""" )
with ThreadPoolExecutor(max_workers=_UpperCAmelCase ) as executor:
UpperCAmelCase__ = []
UpperCAmelCase__ = Counter()
UpperCAmelCase__ = 0
UpperCAmelCase__ = defaultdict(_UpperCAmelCase )
for task_id, (candidates, test_case) in enumerate(zip(_UpperCAmelCase , _UpperCAmelCase ) ):
for candidate in candidates:
UpperCAmelCase__ = candidate + """\n""" + test_case
UpperCAmelCase__ = (test_program, timeout, task_id, completion_id[task_id])
UpperCAmelCase__ = executor.submit(_UpperCAmelCase , *_UpperCAmelCase )
futures.append(_UpperCAmelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(_UpperCAmelCase ):
UpperCAmelCase__ = future.result()
results[result["task_id"]].append((result["""completion_id"""], result) )
UpperCAmelCase__ , UpperCAmelCase__ = [], []
for result in results.values():
result.sort()
UpperCAmelCase__ = [r[1]["""passed"""] for r in result]
total.append(len(_UpperCAmelCase ) )
correct.append(sum(_UpperCAmelCase ) )
UpperCAmelCase__ = np.array(_UpperCAmelCase )
UpperCAmelCase__ = np.array(_UpperCAmelCase )
UpperCAmelCase__ = k
UpperCAmelCase__ = {f'''pass@{k}''': estimate_pass_at_k(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
def estimator(SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = itertools.repeat(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) )
else:
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = iter(SCREAMING_SNAKE_CASE__ )
return np.array([estimator(int(SCREAMING_SNAKE_CASE__ ) , int(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) for n, c in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )] )
| 346 | 0 |
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Union[str, Any] = [0] * len(__lowerCamelCase )
for i in range(1 , len(__lowerCamelCase ) ):
# use last results for better performance - dynamic programming
__snake_case : Dict = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
__snake_case : List[Any] = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
__snake_case : Optional[int] = j
return prefix_result
def lowerCAmelCase_ ( __lowerCamelCase ):
return max(prefix_function(__lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = ["image_processor", "tokenizer"]
__UpperCAmelCase : str = "OwlViTImageProcessor"
__UpperCAmelCase : Dict = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : str , lowerCamelCase : Any=None , lowerCamelCase : Any=None , **lowerCamelCase : Union[str, Any] ) -> List[Any]:
__snake_case : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCamelCase , )
__snake_case : List[Any] = kwargs.pop("feature_extractor" )
__snake_case : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowerCamelCase , lowerCamelCase )
def __call__( self : Union[str, Any] , lowerCamelCase : Tuple=None , lowerCamelCase : int=None , lowerCamelCase : Union[str, Any]=None , lowerCamelCase : List[str]="max_length" , lowerCamelCase : Dict="np" , **lowerCamelCase : str ) -> List[Any]:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(lowerCamelCase , lowerCamelCase ) or (isinstance(lowerCamelCase , lowerCamelCase ) and not isinstance(text[0] , lowerCamelCase )):
__snake_case : Union[str, Any] = [self.tokenizer(lowerCamelCase , padding=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )]
elif isinstance(lowerCamelCase , lowerCamelCase ) and isinstance(text[0] , lowerCamelCase ):
__snake_case : Tuple = []
# Maximum number of queries across batch
__snake_case : str = max([len(lowerCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(lowerCamelCase ) != max_num_queries:
__snake_case : Dict = t + [" "] * (max_num_queries - len(lowerCamelCase ))
__snake_case : int = self.tokenizer(lowerCamelCase , padding=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
encodings.append(lowerCamelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
__snake_case : Any = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__snake_case : Tuple = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__snake_case : List[Any] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__snake_case : Any = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__snake_case : int = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
__snake_case : int = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__snake_case : int = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
__snake_case : Dict = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
__snake_case : Any = BatchEncoding()
__snake_case : Tuple = input_ids
__snake_case : int = attention_mask
if query_images is not None:
__snake_case : List[Any] = BatchEncoding()
__snake_case : Union[str, Any] = self.image_processor(
lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase ).pixel_values
__snake_case : str = query_pixel_values
if images is not None:
__snake_case : Optional[int] = self.image_processor(lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
if text is not None and images is not None:
__snake_case : List[str] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__snake_case : int = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase ) , tensor_type=lowerCamelCase )
def __snake_case ( self : Dict , *lowerCamelCase : List[Any] , **lowerCamelCase : Union[str, Any] ) -> str:
return self.image_processor.post_process(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Union[str, Any] , *lowerCamelCase : str , **lowerCamelCase : List[str] ) -> Tuple:
return self.image_processor.post_process_object_detection(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Optional[Any] , *lowerCamelCase : Optional[Any] , **lowerCamelCase : Optional[Any] ) -> Any:
return self.image_processor.post_process_image_guided_detection(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : List[Any] , *lowerCamelCase : Tuple , **lowerCamelCase : Optional[int] ) -> str:
return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Union[str, Any] , *lowerCamelCase : Tuple , **lowerCamelCase : List[Any] ) -> Tuple:
return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
@property
def __snake_case ( self : Any ) -> Dict:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowerCamelCase , )
return self.image_processor_class
@property
def __snake_case ( self : List[str] ) -> Union[str, Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowerCamelCase , )
return self.image_processor
| 134 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase__ = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 96 |
'''simple docstring'''
_UpperCamelCase = '''
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_UpperCamelCase = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_UpperCamelCase = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 254 | 0 |
import functools
from typing import Any
def A__ ( __lowerCamelCase, __lowerCamelCase ):
# Validation
if not isinstance(__lowerCamelCase, __lowerCamelCase ) or len(__lowerCamelCase ) == 0:
raise ValueError('''the string should be not empty string''' )
if not isinstance(__lowerCamelCase, __lowerCamelCase ) or not all(
isinstance(__lowerCamelCase, __lowerCamelCase ) and len(__lowerCamelCase ) > 0 for item in words ):
raise ValueError('''the words should be a list of non-empty strings''' )
# Build trie
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = '''WORD_KEEPER'''
for word in words:
SCREAMING_SNAKE_CASE_ = trie
for c in word:
if c not in trie_node:
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = trie_node[c]
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase )
# Dynamic programming method
@functools.cache
def is_breakable(__lowerCamelCase ) -> bool:
if index == len_string:
return True
SCREAMING_SNAKE_CASE_ = trie
for i in range(__lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = trie_node.get(string[i], __lowerCamelCase )
if trie_node is None:
return False
if trie_node.get(__lowerCamelCase, __lowerCamelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def A__ ( ):
SCREAMING_SNAKE_CASE_ = 9
SCREAMING_SNAKE_CASE_ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
SCREAMING_SNAKE_CASE_ = kruskal(__lowerCamelCase, __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(__lowerCamelCase ) == sorted(__lowerCamelCase )
| 257 | 0 |
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
a_ : List[Any] = pd.read_csv(
'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'
'position_salaries.csv'
)
a_ : Union[str, Any] = dataset.iloc[:, 1:2].values
a_ : Union[str, Any] = dataset.iloc[:, 2].values
a_ , a_ , a_ , a_ : Optional[Any] = train_test_split(X, y, test_size=0.2, random_state=0)
a_ : Union[str, Any] = PolynomialFeatures(degree=4)
a_ : int = poly_reg.fit_transform(X)
a_ : int = LinearRegression()
pol_reg.fit(X_poly, y)
def lowerCamelCase__ ():
plt.scatter(_UpperCAmelCase , _UpperCAmelCase , color='red')
plt.plot(_UpperCAmelCase , pol_reg.predict(poly_reg.fit_transform(_UpperCAmelCase)) , color='blue')
plt.title('Truth or Bluff (Linear Regression)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 137 |
def lowerCamelCase__ (_UpperCAmelCase):
def merge(_UpperCAmelCase , _UpperCAmelCase) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0)
yield from left
yield from right
return list(_merge())
if len(_UpperCAmelCase) <= 1:
return collection
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) // 2
return merge(merge_sort(collection[:mid]) , merge_sort(collection[mid:]))
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : Tuple = input('Enter numbers separated by a comma:\n').strip()
a_ : Optional[Any] = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 137 | 1 |
def UpperCamelCase ( snake_case__ : int ) -> str:
UpperCamelCase : int = int(snake_case__ )
if decimal in (0, 1): # Exit cases for the recursion
return str(snake_case__ )
UpperCamelCase , UpperCamelCase : str = divmod(snake_case__ , 2 )
return binary_recursive(snake_case__ ) + str(snake_case__ )
def UpperCamelCase ( snake_case__ : str ) -> str:
UpperCamelCase : Dict = str(snake_case__ ).strip()
if not number:
raise ValueError('No input value was provided' )
UpperCamelCase : List[Any] = '-' if number.startswith('-' ) else ''
UpperCamelCase : Optional[int] = number.lstrip('-' )
if not number.isnumeric():
raise ValueError('Input value is not an integer' )
return F"""{negative}0b{binary_recursive(int(snake_case__ ) )}"""
if __name__ == "__main__":
from doctest import testmod
testmod()
| 103 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase : List[str] = jnp.ones((batch_size, length) ) / length
return scores
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Optional[Any] = None
UpperCamelCase : Optional[int] = 20
UpperCamelCase : Optional[Any] = self._get_uniform_logits(batch_size=2, length=SCREAMING_SNAKE_CASE_ )
# tweak scores to not be uniform anymore
UpperCamelCase : Dict = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
UpperCamelCase : Any = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
UpperCamelCase : List[str] = jax.nn.softmax(SCREAMING_SNAKE_CASE_, axis=-1 )
UpperCamelCase : List[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCamelCase : int = FlaxTemperatureLogitsWarper(temperature=1.3 )
UpperCamelCase : Tuple = jax.nn.softmax(temp_dist_warper_sharper(SCREAMING_SNAKE_CASE_, scores.copy(), cur_len=SCREAMING_SNAKE_CASE_ ), axis=-1 )
UpperCamelCase : Any = jax.nn.softmax(temp_dist_warper_smoother(SCREAMING_SNAKE_CASE_, scores.copy(), cur_len=SCREAMING_SNAKE_CASE_ ), axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :], warped_prob_sharp[0, :], atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :], warped_prob_smooth[0, :], atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max(), warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min(), warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max(), warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min(), warped_prob_smooth[1, :].min() )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Dict = None
UpperCamelCase : Any = 10
UpperCamelCase : Any = 2
# create ramp distribution
UpperCamelCase : List[Any] = np.broadcast_to(np.arange(SCREAMING_SNAKE_CASE_ )[None, :], (batch_size, vocab_size) ).copy()
UpperCamelCase : Tuple = ramp_logits[1:, : vocab_size // 2] + vocab_size
UpperCamelCase : Dict = FlaxTopKLogitsWarper(3 )
UpperCamelCase : Tuple = top_k_warp(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist(), 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist(), 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
UpperCamelCase : Optional[int] = 5
UpperCamelCase : Optional[int] = FlaxTopKLogitsWarper(top_k=1, filter_value=0.0, min_tokens_to_keep=3 )
UpperCamelCase : Union[str, Any] = np.broadcast_to(np.arange(SCREAMING_SNAKE_CASE_ )[None, :], (batch_size, length) ).copy()
UpperCamelCase : List[str] = top_k_warp_safety_check(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist(), [2, 2] )
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : int = None
UpperCamelCase : List[str] = 10
UpperCamelCase : Optional[Any] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
UpperCamelCase : Optional[int] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
UpperCamelCase : Optional[Any] = FlaxTopPLogitsWarper(0.8 )
UpperCamelCase : int = np.exp(top_p_warp(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
UpperCamelCase : Any = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
# check edge cases with negative and extreme logits
UpperCamelCase : Optional[Any] = np.broadcast_to(np.arange(SCREAMING_SNAKE_CASE_ )[None, :], (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
UpperCamelCase : Tuple = ramp_logits[1] * 1_00.0
# make sure at least 2 tokens are kept
UpperCamelCase : int = FlaxTopPLogitsWarper(0.9, min_tokens_to_keep=2, filter_value=0.0 )
UpperCamelCase : List[str] = top_p_warp(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist(), [3, 2] )
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Union[str, Any] = 20
UpperCamelCase : Union[str, Any] = 4
UpperCamelCase : Optional[int] = 0
UpperCamelCase : Dict = FlaxMinLengthLogitsProcessor(min_length=10, eos_token_id=SCREAMING_SNAKE_CASE_ )
# check that min length is applied at length 5
UpperCamelCase : List[str] = ids_tensor((batch_size, 20), vocab_size=20 )
UpperCamelCase : Any = 5
UpperCamelCase : Tuple = self._get_uniform_logits(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = min_dist_processor(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist(), 4 * [-float('inf' )] )
# check that min length is not applied anymore at length 15
UpperCamelCase : Any = self._get_uniform_logits(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = 15
UpperCamelCase : str = min_dist_processor(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
self.assertFalse(jnp.isinf(SCREAMING_SNAKE_CASE_ ).any() )
def snake_case_ ( self ) -> Dict:
UpperCamelCase : str = 20
UpperCamelCase : List[Any] = 4
UpperCamelCase : List[str] = 0
UpperCamelCase : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=SCREAMING_SNAKE_CASE_ )
# check that all scores are -inf except the bos_token_id score
UpperCamelCase : Any = ids_tensor((batch_size, 1), vocab_size=20 )
UpperCamelCase : List[Any] = 1
UpperCamelCase : Tuple = self._get_uniform_logits(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = logits_processor(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist(), 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
UpperCamelCase : Dict = 3
UpperCamelCase : str = self._get_uniform_logits(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = logits_processor(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
self.assertFalse(jnp.isinf(SCREAMING_SNAKE_CASE_ ).any() )
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : Union[str, Any] = 20
UpperCamelCase : Optional[Any] = 4
UpperCamelCase : List[Any] = 0
UpperCamelCase : int = 5
UpperCamelCase : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_ )
# check that all scores are -inf except the eos_token_id when max_length is reached
UpperCamelCase : str = ids_tensor((batch_size, 4), vocab_size=20 )
UpperCamelCase : Tuple = 4
UpperCamelCase : Union[str, Any] = self._get_uniform_logits(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = logits_processor(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist(), 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
UpperCamelCase : str = 3
UpperCamelCase : List[Any] = self._get_uniform_logits(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = logits_processor(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
self.assertFalse(jnp.isinf(SCREAMING_SNAKE_CASE_ ).any() )
def snake_case_ ( self ) -> int:
UpperCamelCase : int = 4
UpperCamelCase : Tuple = 10
UpperCamelCase : str = 15
UpperCamelCase : List[str] = 2
UpperCamelCase : Any = 1
UpperCamelCase : List[str] = 15
# dummy input_ids and scores
UpperCamelCase : Dict = ids_tensor((batch_size, sequence_length), SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = input_ids.copy()
UpperCamelCase : Any = self._get_uniform_logits(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = scores.copy()
# instantiate all dist processors
UpperCamelCase : List[str] = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCamelCase : Optional[Any] = FlaxTopKLogitsWarper(3 )
UpperCamelCase : Optional[int] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
UpperCamelCase : Optional[Any] = FlaxMinLengthLogitsProcessor(min_length=10, eos_token_id=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = 10
# no processor list
UpperCamelCase : Any = temp_dist_warp(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = top_k_warp(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = top_p_warp(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = min_dist_proc(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = bos_dist_proc(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = eos_dist_proc(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
# with processor list
UpperCamelCase : List[str] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
UpperCamelCase : Optional[Any] = processor(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
# scores should be equal
self.assertTrue(jnp.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist(), input_ids_comp.tolist() )
def snake_case_ ( self ) -> int:
UpperCamelCase : Optional[Any] = 4
UpperCamelCase : Tuple = 10
UpperCamelCase : Union[str, Any] = 15
UpperCamelCase : Union[str, Any] = 2
UpperCamelCase : Optional[Any] = 1
UpperCamelCase : int = 15
# dummy input_ids and scores
UpperCamelCase : Dict = ids_tensor((batch_size, sequence_length), SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = input_ids.copy()
UpperCamelCase : Optional[int] = self._get_uniform_logits(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = scores.copy()
# instantiate all dist processors
UpperCamelCase : Dict = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCamelCase : Optional[Any] = FlaxTopKLogitsWarper(3 )
UpperCamelCase : Union[str, Any] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
UpperCamelCase : str = FlaxMinLengthLogitsProcessor(min_length=10, eos_token_id=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = FlaxForcedEOSTokenLogitsProcessor(max_length=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = 10
# no processor list
def run_no_processor_list(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[Any] = temp_dist_warp(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = top_k_warp(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = top_p_warp(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = min_dist_proc(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = bos_dist_proc(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = eos_dist_proc(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
return scores
# with processor list
def run_processor_list(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Tuple = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
UpperCamelCase : Union[str, Any] = processor(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cur_len=SCREAMING_SNAKE_CASE_ )
return scores
UpperCamelCase : Dict = jax.jit(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = jax.jit(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = jitted_run_no_processor_list(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = jitted_run_processor_list(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# scores should be equal
self.assertTrue(jnp.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist(), input_ids_comp.tolist() )
| 103 | 1 |
"""simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] )
@pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] )
@pytest.mark.parametrize('revision' , [None, 'v2'] )
def _snake_case ( snake_case__ : Any , snake_case__ : str , snake_case__ : Dict ):
A = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ )
assert url == F'https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(snake_case__ )}' | 74 |
"""simple docstring"""
from string import ascii_uppercase
_lowercase = {char: i for i, char in enumerate(ascii_uppercase)}
_lowercase = dict(enumerate(ascii_uppercase))
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = len(snake_case__ )
A = 0
while True:
if x == i:
A = 0
if len(snake_case__ ) == len(snake_case__ ):
break
key += key[i]
i += 1
return key
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = ''
A = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
A = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = ''
A = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
A = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def _snake_case ( ):
A = 'THE GERMAN ATTACK'
A = 'SECRET'
A = generate_key(snake_case__ , snake_case__ )
A = cipher_text(snake_case__ , snake_case__ )
print(F'Encrypted Text = {s}' )
print(F'Original Text = {original_text(snake_case__ , snake_case__ )}' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 74 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( __snake_case : int , __snake_case : List[Any] , __snake_case : str ) -> Any:
"""simple docstring"""
# Initialise PyTorch model
lowerCamelCase_ =RemBertConfig.from_json_file(__snake_case )
print('''Building PyTorch model from configuration: {}'''.format(str(__snake_case ) ) )
lowerCamelCase_ =RemBertModel(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(__snake_case ) )
torch.save(model.state_dict() , __snake_case )
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--rembert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained RemBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a_ : Dict = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 6 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
a_ : List[Any] = """\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
"""
a_ : List[Any] = """
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
"""
a_ : Any = """
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the CUAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'aupr': Area Under the Precision-Recall curve
'prec_at_80_recall': Precision at 80% recall
'prec_at_90_recall': Precision at 90% recall
Examples:
>>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def lowercase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': {
'''id''': datasets.Value('''string''' ),
'''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ),
},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ), codebase_urls=['''https://www.atticusprojectai.org/cuad'''], reference_urls=['''https://www.atticusprojectai.org/cuad'''], )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ ={prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
lowerCamelCase_ =[
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
lowerCamelCase_ =evaluate(dataset=lowerCAmelCase, predictions=lowerCAmelCase )
return score
| 6 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.dummy_uncond_unet
SCREAMING_SNAKE_CASE : Any = KarrasVeScheduler()
SCREAMING_SNAKE_CASE : List[str] = KarrasVePipeline(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = pipe(num_inference_steps=2 , generator=lowerCAmelCase_ , output_type="""numpy""" ).images
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = pipe(num_inference_steps=2 , generator=lowerCAmelCase_ , output_type="""numpy""" , return_dict=lowerCAmelCase_ )[0]
SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : str = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = """google/ncsnpp-celebahq-256"""
SCREAMING_SNAKE_CASE : Tuple = UNetaDModel.from_pretrained(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = KarrasVeScheduler()
SCREAMING_SNAKE_CASE : Tuple = KarrasVePipeline(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = pipe(num_inference_steps=20 , generator=lowerCAmelCase_ , output_type="""numpy""" ).images
SCREAMING_SNAKE_CASE : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
SCREAMING_SNAKE_CASE : List[str] = np.array([0.578, 0.5_811, 0.5_924, 0.5_809, 0.587, 0.5_886, 0.5_861, 0.5_802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 323 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import MutableSequence
class lowerCamelCase :
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : MutableSequence[float] ) -> None:
'''simple docstring'''
if len(lowerCAmelCase_ ) != degree + 1:
raise ValueError(
"""The number of coefficients should be equal to the degree + 1.""" )
A__ : list[float] =list(lowerCAmelCase_ )
A__ : Optional[int] =degree
def __add__( self : Union[str, Any] , lowerCAmelCase_ : Polynomial ) -> Polynomial:
'''simple docstring'''
if self.degree > polynomial_a.degree:
A__ : int =self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , lowerCAmelCase_ )
else:
A__ : Any =polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , lowerCAmelCase_ )
def __sub__( self : str , lowerCAmelCase_ : Polynomial ) -> Polynomial:
'''simple docstring'''
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : List[Any] ) -> Polynomial:
'''simple docstring'''
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : str , lowerCAmelCase_ : Polynomial ) -> Polynomial:
'''simple docstring'''
A__ : list[float] =[0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , lowerCAmelCase_ )
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : int | float ) -> int | float:
'''simple docstring'''
A__ : int | float =0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : List[str] ) -> str:
'''simple docstring'''
A__ : Optional[int] =""""""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowerCAmelCase_ )
return polynomial
def __repr__( self : Optional[Any] ) -> str:
'''simple docstring'''
return self.__str__()
def lowercase__ ( self : str ) -> Polynomial:
'''simple docstring'''
A__ : list[float] =[0] * self.degree
for i in range(self.degree ):
A__ : Union[str, Any] =self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , lowerCAmelCase_ )
def lowercase__ ( self : Any , lowerCAmelCase_ : int | float = 0 ) -> Polynomial:
'''simple docstring'''
A__ : list[float] =[0] * (self.degree + 2)
A__ : Any =constant
for i in range(self.degree + 1 ):
A__ : str =self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , lowerCAmelCase_ )
def __eq__( self : Optional[int] , lowerCAmelCase_ : object ) -> bool:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Optional[Any] , lowerCAmelCase_ : object ) -> bool:
'''simple docstring'''
return not self.__eq__(lowerCAmelCase_ )
| 134 | 0 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowerCamelCase_ : Optional[int] = logging.get_logger("""transformers.models.speecht5""")
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
hf_model.apply_weight_norm()
__a = checkpoint['input_conv.weight_g']
__a = checkpoint['input_conv.weight_v']
__a = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
__a = checkpoint[f'''upsamples.{i}.1.weight_g''']
__a = checkpoint[f'''upsamples.{i}.1.weight_v''']
__a = checkpoint[f'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
__a = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g''']
__a = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v''']
__a = checkpoint[f'''blocks.{i}.convs1.{j}.1.bias''']
__a = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g''']
__a = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v''']
__a = checkpoint[f'''blocks.{i}.convs2.{j}.1.bias''']
__a = checkpoint['output_conv.1.weight_g']
__a = checkpoint['output_conv.1.weight_v']
__a = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , ):
if config_path is not None:
__a = SpeechTaHifiGanConfig.from_pretrained(__lowerCamelCase )
else:
__a = SpeechTaHifiGanConfig()
__a = SpeechTaHifiGan(__lowerCamelCase )
__a = torch.load(__lowerCamelCase )
load_weights(orig_checkpoint['model']['generator'] , __lowerCamelCase , __lowerCamelCase )
__a = np.load(__lowerCamelCase )
__a = stats[0].reshape(-1 )
__a = stats[1].reshape(-1 )
__a = torch.from_numpy(__lowerCamelCase ).float()
__a = torch.from_numpy(__lowerCamelCase ).float()
model.save_pretrained(__lowerCamelCase )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(__lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
lowerCamelCase_ : str = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 368 | from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowerCAmelCase( __lowerCamelCase ):
for param in module.parameters():
__a = False
def lowerCAmelCase( ):
__a = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
__a = 'mps'
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def lowerCAmelCase( __lowerCamelCase ):
__a = plt.imshow(__lowerCamelCase )
fig.axes.get_xaxis().set_visible(__lowerCamelCase )
fig.axes.get_yaxis().set_visible(__lowerCamelCase )
plt.show()
def lowerCAmelCase( ):
__a = datetime.now()
__a = current_time.strftime('%H:%M:%S' )
return timestamp
| 197 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_UpperCAmelCase : Tuple = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_UpperCAmelCase : Any = {
'''vocab_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'''
),
'''google/electra-base-generator''': '''https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt''',
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'''
),
'''google/electra-base-generator''': (
'''https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'''
),
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'''
),
},
}
_UpperCAmelCase : int = {
'''google/electra-small-generator''': 5_1_2,
'''google/electra-base-generator''': 5_1_2,
'''google/electra-large-generator''': 5_1_2,
'''google/electra-small-discriminator''': 5_1_2,
'''google/electra-base-discriminator''': 5_1_2,
'''google/electra-large-discriminator''': 5_1_2,
}
_UpperCAmelCase : Optional[Any] = {
'''google/electra-small-generator''': {'''do_lower_case''': True},
'''google/electra-base-generator''': {'''do_lower_case''': True},
'''google/electra-large-generator''': {'''do_lower_case''': True},
'''google/electra-small-discriminator''': {'''do_lower_case''': True},
'''google/electra-base-discriminator''': {'''do_lower_case''': True},
'''google/electra-large-discriminator''': {'''do_lower_case''': True},
}
class a__ ( UpperCamelCase_ ):
"""simple docstring"""
__UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
__UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : List[str] = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : str = ElectraTokenizer
def __init__(self , __lowercase=None , __lowercase=None , __lowercase=True , __lowercase="[UNK]" , __lowercase="[SEP]" , __lowercase="[PAD]" , __lowercase="[CLS]" , __lowercase="[MASK]" , __lowercase=True , __lowercase=None , **__lowercase , ):
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _A ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _A ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _A ) != tokenize_chinese_chars
):
__lowerCAmelCase = getattr(_A , normalizer_state.pop('''type''' ) )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = strip_accents
__lowerCAmelCase = tokenize_chinese_chars
__lowerCAmelCase = normalizer_class(**_A )
__lowerCAmelCase = do_lower_case
def _snake_case (self , __lowercase , __lowercase=None ):
__lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _snake_case (self , __lowercase , __lowercase = None ):
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case (self , __lowercase , __lowercase = None ):
__lowerCAmelCase = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 174 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , _A , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = dataset
__SCREAMING_SNAKE_CASE = process
__SCREAMING_SNAKE_CASE = params
def __len__( self ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.dataset[i]
__SCREAMING_SNAKE_CASE = self.process(_A , **self.params )
return processed
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , _A , _A , _A , _A=None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = loader
__SCREAMING_SNAKE_CASE = infer
__SCREAMING_SNAKE_CASE = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = loader_batch_size
# Internal bookkeeping
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def __len__( self ):
'''simple docstring'''
return len(self.loader )
def __iter__( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = iter(self.loader )
return self
def _A ( self ):
'''simple docstring'''
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
__SCREAMING_SNAKE_CASE = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
__SCREAMING_SNAKE_CASE = {}
for k, element in self._loader_batch_data.items():
if isinstance(_A , _A ):
# Convert ModelOutput to tuple first
__SCREAMING_SNAKE_CASE = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
__SCREAMING_SNAKE_CASE = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
__SCREAMING_SNAKE_CASE = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_A , _A ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
__SCREAMING_SNAKE_CASE = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
__SCREAMING_SNAKE_CASE = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
__SCREAMING_SNAKE_CASE = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__SCREAMING_SNAKE_CASE = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__SCREAMING_SNAKE_CASE = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
__SCREAMING_SNAKE_CASE = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
__SCREAMING_SNAKE_CASE = self._loader_batch_data.__class__(_A )
self._loader_batch_index += 1
return result
def _A ( self ):
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
__SCREAMING_SNAKE_CASE = next(self.iterator )
__SCREAMING_SNAKE_CASE = self.infer(_A , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_A , torch.Tensor ):
__SCREAMING_SNAKE_CASE = processed
else:
__SCREAMING_SNAKE_CASE = list(processed.keys() )[0]
__SCREAMING_SNAKE_CASE = processed[key]
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE = len(_A )
else:
__SCREAMING_SNAKE_CASE = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__SCREAMING_SNAKE_CASE = observed_batch_size
# Setting internal index to unwrap the batch
__SCREAMING_SNAKE_CASE = processed
__SCREAMING_SNAKE_CASE = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , _A , _A , _A , _A=None ):
'''simple docstring'''
super().__init__(_A , _A , _A )
def __iter__( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = iter(self.loader )
__SCREAMING_SNAKE_CASE = None
return self
def _A ( self ):
'''simple docstring'''
if self.subiterator is None:
__SCREAMING_SNAKE_CASE = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
__SCREAMING_SNAKE_CASE = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
__SCREAMING_SNAKE_CASE = self.infer(next(self.iterator ) , **self.params )
__SCREAMING_SNAKE_CASE = next(self.subiterator )
return processed
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __iter__( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = iter(self.loader )
return self
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
__SCREAMING_SNAKE_CASE = self.loader_batch_item()
__SCREAMING_SNAKE_CASE = item.pop('is_last' )
accumulator.append(_A )
if is_last:
return accumulator
while not is_last:
__SCREAMING_SNAKE_CASE = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_A , torch.Tensor ):
__SCREAMING_SNAKE_CASE = processed
else:
__SCREAMING_SNAKE_CASE = list(processed.keys() )[0]
__SCREAMING_SNAKE_CASE = processed[key]
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE = len(_A )
else:
__SCREAMING_SNAKE_CASE = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__SCREAMING_SNAKE_CASE = observed_batch_size
__SCREAMING_SNAKE_CASE = processed
__SCREAMING_SNAKE_CASE = 0
while self._loader_batch_index < self.loader_batch_size:
__SCREAMING_SNAKE_CASE = self.loader_batch_item()
__SCREAMING_SNAKE_CASE = item.pop('is_last' )
accumulator.append(_A )
if is_last:
return accumulator
else:
__SCREAMING_SNAKE_CASE = processed
__SCREAMING_SNAKE_CASE = item.pop('is_last' )
accumulator.append(_A )
return accumulator
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = dataset
__SCREAMING_SNAKE_CASE = key
def __len__( self ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self , _A ):
'''simple docstring'''
return self.dataset[i][self.key]
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , _A , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = dataset
__SCREAMING_SNAKE_CASE = keya
__SCREAMING_SNAKE_CASE = keya
def __len__( self ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self , _A ):
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 257 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class a :
"""simple docstring"""
def __init__( self : Union[str, Any] , snake_case : List[Any] , snake_case : Optional[int]=13 , snake_case : Optional[Any]=7 , snake_case : List[Any]=True , snake_case : int=True , snake_case : Optional[int]=True , snake_case : Union[str, Any]=99 , snake_case : List[str]=32 , snake_case : Any=5 , snake_case : str=4 , snake_case : Tuple=37 , snake_case : List[Any]="gelu" , snake_case : List[Any]=0.1 , snake_case : Tuple=0.1 , snake_case : int=512 , snake_case : str=16 , snake_case : str=2 , snake_case : Union[str, Any]=0.02 , snake_case : Optional[Any]=3 , snake_case : Union[str, Any]=4 , snake_case : Optional[int]=None , ) -> Union[str, Any]:
__UpperCAmelCase : Any = parent
__UpperCAmelCase : int = batch_size
__UpperCAmelCase : Optional[int] = seq_length
__UpperCAmelCase : Any = is_training
__UpperCAmelCase : Any = use_token_type_ids
__UpperCAmelCase : Optional[Any] = use_labels
__UpperCAmelCase : int = vocab_size
__UpperCAmelCase : Optional[int] = hidden_size
__UpperCAmelCase : Dict = num_hidden_layers
__UpperCAmelCase : Optional[int] = num_attention_heads
__UpperCAmelCase : Dict = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Any = hidden_dropout_prob
__UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : List[Any] = type_vocab_size
__UpperCAmelCase : Optional[int] = type_sequence_label_size
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : Optional[int] = num_labels
__UpperCAmelCase : Union[str, Any] = num_choices
__UpperCAmelCase : Optional[Any] = scope
__UpperCAmelCase : Union[str, Any] = self.vocab_size - 1
def lowerCamelCase__ ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : str = None
__UpperCAmelCase : Dict = None
__UpperCAmelCase : List[str] = None
if self.use_labels:
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : List[Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__UpperCAmelCase : Any = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase__ ( self : Dict , snake_case : List[Any] , snake_case : List[str] , snake_case : Any , snake_case : int , *snake_case : Optional[int] ) -> Optional[Any]:
__UpperCAmelCase : Tuple = OpenAIGPTModel(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Union[str, Any] = model(snake_case , token_type_ids=snake_case , head_mask=snake_case )
__UpperCAmelCase : List[Any] = model(snake_case , token_type_ids=snake_case )
__UpperCAmelCase : Any = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : str , snake_case : Optional[Any] , snake_case : Any , snake_case : List[str] , snake_case : Tuple , *snake_case : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = OpenAIGPTLMHeadModel(snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Union[str, Any] = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : List[Any] , snake_case : Tuple , snake_case : Dict , snake_case : str , snake_case : int , *snake_case : Dict ) -> Any:
__UpperCAmelCase : Tuple = OpenAIGPTDoubleHeadsModel(snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Optional[int] = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : str , snake_case : int , snake_case : List[str] , snake_case : Any , snake_case : Union[str, Any] , *snake_case : Any ) -> int:
__UpperCAmelCase : Dict = self.num_labels
__UpperCAmelCase : Dict = OpenAIGPTForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : List[str] = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : int ) -> Optional[int]:
__UpperCAmelCase : Dict = self.prepare_config_and_inputs()
(
__UpperCAmelCase
) : Dict = config_and_inputs
__UpperCAmelCase : str = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class a ( _a , _a , _a , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Union[str, Any] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
SCREAMING_SNAKE_CASE : List[Any] = (
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCamelCase__ ( self : Tuple , snake_case : Dict , snake_case : Any , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : str ) -> Any:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def lowerCamelCase__ ( self : List[Any] , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : Tuple=False ) -> Tuple:
__UpperCAmelCase : int = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__UpperCAmelCase : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=snake_case , )
__UpperCAmelCase : Dict = inputs_dict['''labels''']
__UpperCAmelCase : Optional[int] = inputs_dict['''labels''']
__UpperCAmelCase : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=snake_case , )
__UpperCAmelCase : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
return inputs_dict
def lowerCamelCase__ ( self : Dict ) -> str:
__UpperCAmelCase : int = OpenAIGPTModelTester(self )
__UpperCAmelCase : Tuple = ConfigTester(self , config_class=snake_case , n_embd=37 )
def lowerCamelCase__ ( self : Any ) -> Dict:
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : str ) -> List[str]:
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*snake_case )
def lowerCamelCase__ ( self : Dict ) -> Any:
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*snake_case )
def lowerCamelCase__ ( self : Any ) -> Optional[int]:
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*snake_case )
def lowerCamelCase__ ( self : Any ) -> Optional[Any]:
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*snake_case )
@slow
def lowerCamelCase__ ( self : List[str] ) -> Dict:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : List[Any] = OpenAIGPTModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
__UpperCAmelCase : Any = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(snake_case )
__UpperCAmelCase : Union[str, Any] = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=snake_case ) # the president is
__UpperCAmelCase : str = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
4_0477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__UpperCAmelCase : int = model.generate(snake_case , do_sample=snake_case )
self.assertListEqual(output_ids[0].tolist() , snake_case ) | 354 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase :List[Any] = {
"configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"],
"tokenization_lxmert": ["LxmertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase :List[Any] = ["LxmertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase :List[Any] = [
"LxmertEncoder",
"LxmertForPreTraining",
"LxmertForQuestionAnswering",
"LxmertModel",
"LxmertPreTrainedModel",
"LxmertVisualFeatureEncoder",
"LxmertXLayer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase :List[str] = [
"TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLxmertForPreTraining",
"TFLxmertMainLayer",
"TFLxmertModel",
"TFLxmertPreTrainedModel",
"TFLxmertVisualFeatureEncoder",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
__UpperCAmelCase :int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 240 | 0 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
A__ : Tuple = logging.get_logger(__name__)
class __snake_case ( UpperCamelCase_ ):
_a = ['''input_values''', '''attention_mask''']
def __init__( self : Any , A_ : int = 1 , A_ : int = 1_6_0_0_0 , A_ : float = 0.0 , A_ : bool = False , A_ : int = 8_0 , A_ : int = 1_6 , A_ : int = 6_4 , A_ : str = "hann_window" , A_ : float = 1.0 , A_ : float = 8_0 , A_ : float = 7_6_0_0 , A_ : float = 1e-10 , A_ : int = 2 , A_ : bool = True , **A_ : Union[str, Any] , ):
super().__init__(feature_size=A_ , sampling_rate=A_ , padding_value=A_ , **A_)
lowerCAmelCase_ : Union[str, Any] = do_normalize
lowerCAmelCase_ : Dict = return_attention_mask
lowerCAmelCase_ : Optional[Any] = num_mel_bins
lowerCAmelCase_ : Union[str, Any] = hop_length
lowerCAmelCase_ : List[str] = win_length
lowerCAmelCase_ : Any = win_function
lowerCAmelCase_ : Union[str, Any] = frame_signal_scale
lowerCAmelCase_ : Optional[int] = fmin
lowerCAmelCase_ : List[str] = fmax
lowerCAmelCase_ : Optional[Any] = mel_floor
lowerCAmelCase_ : List[Any] = reduction_factor
lowerCAmelCase_ : Any = win_length * sampling_rate // 1_0_0_0
lowerCAmelCase_ : Union[str, Any] = hop_length * sampling_rate // 1_0_0_0
lowerCAmelCase_ : Optional[Any] = optimal_fft_length(self.sample_size)
lowerCAmelCase_ : Tuple = (self.n_fft // 2) + 1
lowerCAmelCase_ : Union[str, Any] = window_function(window_length=self.sample_size , name=self.win_function , periodic=A_)
lowerCAmelCase_ : str = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='''slaney''' , mel_scale='''slaney''' , )
if frame_signal_scale != 1.0:
warnings.warn(
'''The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers''' , A_ , )
if reduction_factor != 2.0:
warnings.warn(
'''The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers''' , A_ , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def UpperCAmelCase__ ( A_ : List[np.ndarray] , A_ : List[np.ndarray] , A_ : float = 0.0):
if attention_mask is not None:
lowerCAmelCase_ : int = np.array(A_ , np.intaa)
lowerCAmelCase_ : Tuple = []
for vector, length in zip(A_ , attention_mask.sum(-1)):
lowerCAmelCase_ : Optional[Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7)
if length < normed_slice.shape[0]:
lowerCAmelCase_ : Any = padding_value
normed_input_values.append(A_)
else:
lowerCAmelCase_ : Optional[int] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7) for x in input_values]
return normed_input_values
def UpperCAmelCase__ ( self : str , A_ : np.ndarray , ):
lowerCAmelCase_ : Optional[int] = spectrogram(
A_ , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='''log10''' , )
return log_mel_spec.T
def __call__( self : Tuple , A_ : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , A_ : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , A_ : Union[bool, str, PaddingStrategy] = False , A_ : Optional[int] = None , A_ : bool = False , A_ : Optional[int] = None , A_ : Optional[bool] = None , A_ : Optional[Union[str, TensorType]] = None , A_ : Optional[int] = None , **A_ : str , ):
if audio is None and audio_target is None:
raise ValueError('''You must provide either `audio` or `audio_target` values.''')
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""")
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''')
if audio is not None:
lowerCAmelCase_ : Optional[int] = self._process_audio(
A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , **A_ , )
else:
lowerCAmelCase_ : Tuple = None
if audio_target is not None:
lowerCAmelCase_ : Tuple = self._process_audio(
A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , **A_ , )
if inputs is None:
return inputs_target
else:
lowerCAmelCase_ : Tuple = inputs_target['''input_values''']
lowerCAmelCase_ : List[Any] = inputs_target.get('''attention_mask''')
if decoder_attention_mask is not None:
lowerCAmelCase_ : int = decoder_attention_mask
return inputs
def UpperCAmelCase__ ( self : Optional[Any] , A_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , A_ : bool = False , A_ : Union[bool, str, PaddingStrategy] = False , A_ : Optional[int] = None , A_ : bool = False , A_ : Optional[int] = None , A_ : Optional[bool] = None , A_ : Optional[Union[str, TensorType]] = None , **A_ : Union[str, Any] , ):
lowerCAmelCase_ : str = isinstance(A_ , np.ndarray) and len(speech.shape) > 1
if is_batched_numpy and len(speech.shape) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""")
lowerCAmelCase_ : List[str] = is_batched_numpy or (
isinstance(A_ , (list, tuple)) and (isinstance(speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
lowerCAmelCase_ : str = [np.asarray(A_ , dtype=np.floataa) for speech in speech]
elif not is_batched and not isinstance(A_ , np.ndarray):
lowerCAmelCase_ : int = np.asarray(A_ , dtype=np.floataa)
elif isinstance(A_ , np.ndarray) and speech.dtype is np.dtype(np.floataa):
lowerCAmelCase_ : List[str] = speech.astype(np.floataa)
# always return batch
if not is_batched:
lowerCAmelCase_ : Union[str, Any] = [speech]
# needed to make pad() work on spectrogram inputs
lowerCAmelCase_ : str = self.feature_size
# convert into correct format for padding
if is_target:
lowerCAmelCase_ : Optional[Any] = [self._extract_mel_features(A_) for waveform in speech]
lowerCAmelCase_ : Dict = BatchFeature({'''input_values''': features})
lowerCAmelCase_ : Union[str, Any] = self.num_mel_bins
else:
lowerCAmelCase_ : int = BatchFeature({'''input_values''': speech})
lowerCAmelCase_ : Tuple = self.pad(
A_ , padding=A_ , max_length=A_ , truncation=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , **A_ , )
lowerCAmelCase_ : Tuple = feature_size_hack
# convert input values to correct format
lowerCAmelCase_ : List[str] = padded_inputs['''input_values''']
if not isinstance(input_values[0] , np.ndarray):
lowerCAmelCase_ : List[str] = [np.asarray(A_ , dtype=np.floataa) for array in input_values]
elif (
not isinstance(A_ , np.ndarray)
and isinstance(input_values[0] , np.ndarray)
and input_values[0].dtype is np.dtype(np.floataa)
):
lowerCAmelCase_ : Any = [array.astype(np.floataa) for array in input_values]
elif isinstance(A_ , np.ndarray) and input_values.dtype is np.dtype(np.floataa):
lowerCAmelCase_ : Dict = input_values.astype(np.floataa)
# convert attention_mask to correct format
lowerCAmelCase_ : List[Any] = padded_inputs.get('''attention_mask''')
if attention_mask is not None:
lowerCAmelCase_ : Optional[Any] = [np.asarray(A_ , dtype=np.intaa) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
lowerCAmelCase_ : Tuple = (
attention_mask
if self._get_padding_strategies(A_ , max_length=A_) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowerCAmelCase_ : str = self.zero_mean_unit_var_norm(
padded_inputs['''input_values'''] , attention_mask=A_ , padding_value=self.padding_value)
if return_tensors is not None:
lowerCAmelCase_ : Dict = padded_inputs.convert_to_tensors(A_)
return padded_inputs
def UpperCAmelCase__ ( self : List[str]):
lowerCAmelCase_ : Any = super().to_dict()
# Don't serialize these as they are derived from the other properties.
lowerCAmelCase_ : List[Any] = ['''window''', '''mel_filters''', '''sample_size''', '''sample_stride''', '''n_fft''', '''n_freqs''']
for name in names:
if name in output:
del output[name]
return output
| 103 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
A__ : str = logging.get_logger(__name__)
A__ : Any = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A__ : str = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
A__ : Union[str, Any] = {
'''google/realm-cc-news-pretrained-embedder''': 512,
'''google/realm-cc-news-pretrained-encoder''': 512,
'''google/realm-cc-news-pretrained-scorer''': 512,
'''google/realm-cc-news-pretrained-openqa''': 512,
'''google/realm-orqa-nq-openqa''': 512,
'''google/realm-orqa-nq-reader''': 512,
'''google/realm-orqa-wq-openqa''': 512,
'''google/realm-orqa-wq-reader''': 512,
}
A__ : Dict = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class __snake_case ( UpperCamelCase_ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_INIT_CONFIGURATION
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = RealmTokenizer
def __init__( self : int , A_ : Optional[int]=None , A_ : Optional[Any]=None , A_ : Optional[Any]=True , A_ : Optional[int]="[UNK]" , A_ : List[Any]="[SEP]" , A_ : List[Any]="[PAD]" , A_ : Optional[Any]="[CLS]" , A_ : Dict="[MASK]" , A_ : List[Any]=True , A_ : List[str]=None , **A_ : List[str] , ):
super().__init__(
A_ , tokenizer_file=A_ , do_lower_case=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , tokenize_chinese_chars=A_ , strip_accents=A_ , **A_ , )
lowerCAmelCase_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('''lowercase''' , A_) != do_lower_case
or normalizer_state.get('''strip_accents''' , A_) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , A_) != tokenize_chinese_chars
):
lowerCAmelCase_ : int = getattr(A_ , normalizer_state.pop('''type'''))
lowerCAmelCase_ : str = do_lower_case
lowerCAmelCase_ : Dict = strip_accents
lowerCAmelCase_ : Optional[Any] = tokenize_chinese_chars
lowerCAmelCase_ : Union[str, Any] = normalizer_class(**A_)
lowerCAmelCase_ : Any = do_lower_case
def UpperCAmelCase__ ( self : Optional[Any] , A_ : Optional[Any] , **A_ : Tuple):
lowerCAmelCase_ : List[str] = PaddingStrategy.MAX_LENGTH
lowerCAmelCase_ : str = text
lowerCAmelCase_ : int = kwargs.pop('''text_pair''' , A_)
lowerCAmelCase_ : str = kwargs.pop('''return_tensors''' , A_)
lowerCAmelCase_ : int = {
'''input_ids''': [],
'''attention_mask''': [],
'''token_type_ids''': [],
}
for idx, candidate_text in enumerate(A_):
if batch_text_pair is not None:
lowerCAmelCase_ : List[Any] = batch_text_pair[idx]
else:
lowerCAmelCase_ : List[Any] = None
lowerCAmelCase_ : int = super().__call__(A_ , A_ , return_tensors=A_ , **A_)
lowerCAmelCase_ : Optional[Any] = encoded_candidates.get('''input_ids''')
lowerCAmelCase_ : List[str] = encoded_candidates.get('''attention_mask''')
lowerCAmelCase_ : Optional[Any] = encoded_candidates.get('''token_type_ids''')
if encoded_input_ids is not None:
output_data["input_ids"].append(A_)
if encoded_attention_mask is not None:
output_data["attention_mask"].append(A_)
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(A_)
lowerCAmelCase_ : List[str] = {key: item for key, item in output_data.items() if len(A_) != 0}
return BatchEncoding(A_ , tensor_type=A_)
def UpperCAmelCase__ ( self : List[str] , A_ : Tuple , A_ : List[Any]=None):
lowerCAmelCase_ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ ( self : Tuple , A_ : List[int] , A_ : Optional[List[int]] = None):
lowerCAmelCase_ : Tuple = [self.sep_token_id]
lowerCAmelCase_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def UpperCAmelCase__ ( self : List[str] , A_ : str , A_ : Optional[str] = None):
lowerCAmelCase_ : List[str] = self._tokenizer.model.save(A_ , name=A_)
return tuple(A_)
| 103 | 1 |
from math import pow, sqrt
def __SCREAMING_SNAKE_CASE ( *__UpperCamelCase : float ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = len(__UpperCamelCase ) > 0 and all(value > 0.0 for value in values )
return result
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : float , __UpperCamelCase : float ) -> float | ValueError:
"""simple docstring"""
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__UpperCamelCase , __UpperCamelCase )
else ValueError("""Input Error: Molar mass values must greater than 0.""" )
)
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : float ) -> float | ValueError:
"""simple docstring"""
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : float ) -> float | ValueError:
"""simple docstring"""
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : float ) -> float | ValueError:
"""simple docstring"""
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : float ) -> float | ValueError:
"""simple docstring"""
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
| 204 | from __future__ import annotations
__lowerCamelCase : Tuple = list[list[int]]
# assigning initial values to the grid
__lowerCamelCase : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__lowerCamelCase : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Matrix , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) -> bool:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Matrix ) -> tuple[int, int] | None:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Matrix ) -> Matrix | None:
"""simple docstring"""
if location := find_empty_location(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = digit
if sudoku(__UpperCamelCase ) is not None:
return grid
SCREAMING_SNAKE_CASE__ = 0
return None
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Matrix ) -> None:
"""simple docstring"""
for row in grid:
for cell in row:
print(__UpperCamelCase , end=""" """ )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
__lowerCamelCase : str = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''')
| 204 | 1 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCAmelCase ( a__ , a__ , a__ ) -> Optional[int]:
# Initialise PyTorch model
__a = RemBertConfig.from_json_file(a__ )
print('''Building PyTorch model from configuration: {}'''.format(str(a__ ) ) )
__a = RemBertModel(a__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(a__ , a__ , a__ )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(a__ ) )
torch.save(model.state_dict() , a__ )
if __name__ == "__main__":
A : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A : Tuple = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path) | 6 |
def __lowerCAmelCase ( a__ ) -> str:
__a = []
__a = set({'''(''', '''[''', '''{'''} )
__a = set({''')''', ''']''', '''}'''} )
__a = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''}
for i in range(len(a__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(a__ ) == 0 or (len(a__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(a__ ) == 0
def __lowerCAmelCase ( ) -> Dict:
__a = input('''Enter sequence of brackets: ''' )
if is_balanced(a__ ):
print(a__ , '''is balanced''' )
else:
print(a__ , '''is not balanced''' )
if __name__ == "__main__":
main() | 6 | 1 |
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
_A = [
'''cross_validation.py''',
'''gradient_accumulation.py''',
'''local_sgd.py''',
'''multi_process_metrics.py''',
'''memory.py''',
'''automatic_gradient_accumulation.py''',
'''fsdp_with_peak_mem_tracking.py''',
'''deepspeed_with_config_support.py''',
'''megatron_lm_gpt_pretraining.py''',
]
class A ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = None, UpperCamelCase__ = None ):
"""simple docstring"""
lowerCAmelCase_ = None
lowerCAmelCase_ = os.path.abspath(os.path.join('''examples''', '''by_feature''' ) )
lowerCAmelCase_ = os.path.abspath('''examples''' )
for item in os.listdir(UpperCamelCase__ ):
if item not in EXCLUDE_EXAMPLES:
lowerCAmelCase_ = os.path.join(UpperCamelCase__, UpperCamelCase__ )
if os.path.isfile(UpperCamelCase__ ) and ".py" in item_path:
with self.subTest(
tested_script=UpperCamelCase__, feature_script=UpperCamelCase__, tested_section='''main()''' if parser_only else '''training_function()''', ):
lowerCAmelCase_ = compare_against_test(
os.path.join(UpperCamelCase__, UpperCamelCase__ ), UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
lowerCAmelCase_ = '''\n'''.join(UpperCamelCase__ )
if special_strings is not None:
for string in special_strings:
lowerCAmelCase_ = diff.replace(UpperCamelCase__, '''''' )
self.assertEqual(UpperCamelCase__, '''''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
self.one_complete_example('''complete_nlp_example.py''', UpperCamelCase__ )
self.one_complete_example('''complete_nlp_example.py''', UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = os.path.abspath(os.path.join('''examples''', '''cv_example.py''' ) )
lowerCAmelCase_ = [
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''', UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
self.one_complete_example('''complete_cv_example.py''', UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
@mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} )
class A ( __UpperCAmelCase ):
__snake_case = False
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ):
"""simple docstring"""
super().setUpClass()
lowerCAmelCase_ = tempfile.mkdtemp()
lowerCAmelCase_ = os.path.join(cls._tmpdir, '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
lowerCAmelCase_ = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ):
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = f"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir, '''epoch_0''' ) ) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = f"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split()
lowerCAmelCase_ = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir, '''step_2''' ) ) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = f"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir, 'epoch_0' )}\n ".split()
lowerCAmelCase_ = run_command(self._launch_args + testargs, return_stdout=UpperCamelCase__ )
self.assertNotIn('''epoch 0:''', UpperCamelCase__ )
self.assertIn('''epoch 1:''', UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = f"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir, 'step_2' )}\n ".split()
lowerCAmelCase_ = run_command(self._launch_args + testargs, return_stdout=UpperCamelCase__ )
if torch.cuda.is_available():
lowerCAmelCase_ = torch.cuda.device_count()
else:
lowerCAmelCase_ = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''', UpperCamelCase__ )
self.assertIn('''epoch 1:''', UpperCamelCase__ )
else:
self.assertIn('''epoch 0:''', UpperCamelCase__ )
self.assertIn('''epoch 1:''', UpperCamelCase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ, {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
lowerCAmelCase_ = run_command(self._launch_args + testargs, return_stdout=UpperCamelCase__ )
lowerCAmelCase_ = re.findall('''({.+})''', UpperCamelCase__ )
lowerCAmelCase_ = [r for r in results if '''accuracy''' in r][-1]
lowerCAmelCase_ = ast.literal_eval(UpperCamelCase__ )
self.assertGreaterEqual(results['''accuracy'''], 0.75 )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ, {'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
lowerCAmelCase_ = f"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__, '''tracking''' ) ) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 167 |
def __UpperCamelCase ( _A ):
if length <= 0 or not isinstance(_A , _A ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(_A )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 167 | 1 |
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"""vocab_file""": """spiece.model"""}
_SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
}
}
_SCREAMING_SNAKE_CASE = {
"""google/bigbird-roberta-base""": 40_96,
"""google/bigbird-roberta-large""": 40_96,
"""google/bigbird-base-trivia-itc""": 40_96,
}
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
__magic_name__: Dict = VOCAB_FILES_NAMES
__magic_name__: List[str] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__: Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__: Tuple = ['input_ids', 'attention_mask']
__magic_name__: List[int] = []
def __init__( self : List[Any] , _A : Any , _A : str="<unk>" , _A : Optional[int]="<s>" , _A : Tuple="</s>" , _A : str="<pad>" , _A : Optional[int]="[SEP]" , _A : int="[MASK]" , _A : int="[CLS]" , _A : Optional[int] = None , **_A : Optional[Any] , ) -> Tuple:
"""simple docstring"""
snake_case_ : Optional[int] = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else bos_token
snake_case_ : List[Any] = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else eos_token
snake_case_ : Optional[Any] = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else unk_token
snake_case_ : List[str] = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else pad_token
snake_case_ : Optional[int] = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else cls_token
snake_case_ : Tuple = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ : int = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token
snake_case_ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
snake_case_ : Optional[int] = vocab_file
snake_case_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCAmelCase )
@property
def UpperCAmelCase_ ( self : Dict ) -> int:
"""simple docstring"""
return self.sp_model.get_piece_size()
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Union[str, Any] = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ) -> Dict:
"""simple docstring"""
snake_case_ : List[str] = self.__dict__.copy()
snake_case_ : List[str] = None
return state
def __setstate__( self : Any , _A : Any ) -> Dict:
"""simple docstring"""
snake_case_ : Any = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
snake_case_ : Union[str, Any] = {}
snake_case_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self : int , _A : Tuple ) -> int:
"""simple docstring"""
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def UpperCAmelCase_ ( self : int , _A : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return self.sp_model.piece_to_id(__lowerCAmelCase )
def UpperCAmelCase_ ( self : List[str] , _A : Tuple ) -> str:
"""simple docstring"""
snake_case_ : int = self.sp_model.IdToPiece(__lowerCAmelCase )
return token
def UpperCAmelCase_ ( self : List[str] , _A : Dict ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Optional[int] = []
snake_case_ : List[Any] = ''
snake_case_ : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
snake_case_ : int = True
snake_case_ : List[str] = []
else:
current_sub_tokens.append(__lowerCAmelCase )
snake_case_ : Union[str, Any] = False
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string.strip()
def UpperCAmelCase_ ( self : str , _A : List[str] , _A : Dict = False , _A : Optional[int] = None , _A : Optional[int] = True , **_A : str , ) -> Tuple:
"""simple docstring"""
snake_case_ : Dict = kwargs.pop('use_source_tokenizer' , __lowerCAmelCase )
snake_case_ : Tuple = self.convert_ids_to_tokens(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
snake_case_ : List[str] = []
snake_case_ : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__lowerCAmelCase ) )
snake_case_ : List[Any] = []
sub_texts.append(__lowerCAmelCase )
else:
current_sub_text.append(__lowerCAmelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__lowerCAmelCase ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
snake_case_ : Union[str, Any] = re.sub(R' (\[(MASK|SEP)\])' , R'\1' , ' '.join(__lowerCAmelCase ) )
else:
snake_case_ : List[Any] = ''.join(__lowerCAmelCase )
snake_case_ : List[str] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
snake_case_ : Any = self.clean_up_tokenization(__lowerCAmelCase )
return clean_text
else:
return text
def UpperCAmelCase_ ( self : Union[str, Any] , _A : Any , _A : List[Any] = None ) -> Dict:
"""simple docstring"""
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case_ : Optional[Any] = os.path.join(
__lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , 'wb' ) as fi:
snake_case_ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (out_vocab_file,)
def UpperCAmelCase_ ( self : Union[str, Any] , _A : Optional[int] , _A : Optional[Any] = None ) -> Any:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ : Any = [self.cls_token_id]
snake_case_ : int = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase_ ( self : int , _A : int , _A : Union[str, Any] = None , _A : Any = False ) -> Dict:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1] + ([0] * len(__lowerCAmelCase )) + [1]
def UpperCAmelCase_ ( self : int , _A : Optional[int] , _A : str = None ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : Dict = [self.sep_token_id]
snake_case_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 327 | """simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any] ) -> int:
'''simple docstring'''
lowercase = [0 for i in range(r + 1 )]
# nc0 = 1
lowercase = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowercase = min(lowerCAmelCase__ , lowerCAmelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=1_0, r=5))
| 197 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : int = logging.get_logger(__name__)
A__ : str = {
'microsoft/wavlm-base': 'https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :List[Any] = "wavlm"
def __init__( self : Dict , snake_case__ : List[str]=32 , snake_case__ : Optional[Any]=768 , snake_case__ : List[str]=12 , snake_case__ : Optional[Any]=12 , snake_case__ : Dict=3072 , snake_case__ : Optional[int]="gelu" , snake_case__ : Any=0.1 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : str=0.0 , snake_case__ : int=0.1 , snake_case__ : Optional[Any]=0.1 , snake_case__ : str=0.02 , snake_case__ : Dict=1E-5 , snake_case__ : Optional[Any]="group" , snake_case__ : int="gelu" , snake_case__ : Optional[Any]=(512, 512, 512, 512, 512, 512, 512) , snake_case__ : Tuple=(5, 2, 2, 2, 2, 2, 2) , snake_case__ : Dict=(10, 3, 3, 3, 3, 2, 2) , snake_case__ : Optional[int]=False , snake_case__ : Any=128 , snake_case__ : List[Any]=16 , snake_case__ : List[str]=320 , snake_case__ : Union[str, Any]=800 , snake_case__ : Any=False , snake_case__ : Any=True , snake_case__ : str=0.05 , snake_case__ : Dict=10 , snake_case__ : Dict=2 , snake_case__ : Optional[Any]=0.0 , snake_case__ : List[Any]=10 , snake_case__ : Dict=320 , snake_case__ : List[Any]=2 , snake_case__ : Any=0.1 , snake_case__ : List[Any]=100 , snake_case__ : str=256 , snake_case__ : List[Any]=256 , snake_case__ : Tuple=0.1 , snake_case__ : Union[str, Any]="mean" , snake_case__ : Any=False , snake_case__ : str=False , snake_case__ : Optional[Any]=256 , snake_case__ : int=(512, 512, 512, 512, 1500) , snake_case__ : Optional[Any]=(5, 3, 3, 1, 1) , snake_case__ : List[Any]=(1, 2, 3, 1, 1) , snake_case__ : int=512 , snake_case__ : Any=80 , snake_case__ : Dict=0 , snake_case__ : Dict=1 , snake_case__ : Union[str, Any]=2 , snake_case__ : Tuple=False , snake_case__ : Union[str, Any]=3 , snake_case__ : Optional[Any]=2 , snake_case__ : Union[str, Any]=3 , snake_case__ : Optional[int]=None , **snake_case__ : Union[str, Any] , ):
super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ )
lowerCamelCase_ : Optional[Any] =hidden_size
lowerCamelCase_ : Optional[Any] =feat_extract_norm
lowerCamelCase_ : List[str] =feat_extract_activation
lowerCamelCase_ : Optional[int] =list(snake_case__ )
lowerCamelCase_ : Tuple =list(snake_case__ )
lowerCamelCase_ : int =list(snake_case__ )
lowerCamelCase_ : int =conv_bias
lowerCamelCase_ : Any =num_buckets
lowerCamelCase_ : Dict =max_bucket_distance
lowerCamelCase_ : Any =num_conv_pos_embeddings
lowerCamelCase_ : str =num_conv_pos_embedding_groups
lowerCamelCase_ : List[Any] =len(self.conv_dim )
lowerCamelCase_ : Dict =num_hidden_layers
lowerCamelCase_ : int =intermediate_size
lowerCamelCase_ : Dict =hidden_act
lowerCamelCase_ : Dict =num_attention_heads
lowerCamelCase_ : Dict =hidden_dropout
lowerCamelCase_ : int =attention_dropout
lowerCamelCase_ : List[str] =activation_dropout
lowerCamelCase_ : Optional[int] =feat_proj_dropout
lowerCamelCase_ : int =final_dropout
lowerCamelCase_ : Tuple =layerdrop
lowerCamelCase_ : str =layer_norm_eps
lowerCamelCase_ : Optional[int] =initializer_range
lowerCamelCase_ : Any =num_ctc_classes
lowerCamelCase_ : Any =vocab_size
lowerCamelCase_ : str =do_stable_layer_norm
lowerCamelCase_ : Union[str, Any] =use_weighted_layer_sum
lowerCamelCase_ : Tuple =classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase_ : Optional[Any] =apply_spec_augment
lowerCamelCase_ : str =mask_time_prob
lowerCamelCase_ : List[Any] =mask_time_length
lowerCamelCase_ : Optional[int] =mask_time_min_masks
lowerCamelCase_ : int =mask_feature_prob
lowerCamelCase_ : Optional[int] =mask_feature_length
# parameters for pretraining with codevector quantized representations
lowerCamelCase_ : List[Any] =num_codevectors_per_group
lowerCamelCase_ : Dict =num_codevector_groups
lowerCamelCase_ : List[Any] =contrastive_logits_temperature
lowerCamelCase_ : Tuple =num_negatives
lowerCamelCase_ : int =codevector_dim
lowerCamelCase_ : str =proj_codevector_dim
lowerCamelCase_ : Optional[int] =diversity_loss_weight
# ctc loss
lowerCamelCase_ : Any =ctc_loss_reduction
lowerCamelCase_ : Optional[Any] =ctc_zero_infinity
# adapter
lowerCamelCase_ : Dict =add_adapter
lowerCamelCase_ : Dict =adapter_kernel_size
lowerCamelCase_ : Optional[Any] =adapter_stride
lowerCamelCase_ : List[Any] =num_adapter_layers
lowerCamelCase_ : Optional[Any] =output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCamelCase_ : Optional[Any] =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCamelCase_ : int =list(snake_case__ )
lowerCamelCase_ : List[Any] =list(snake_case__ )
lowerCamelCase_ : Any =list(snake_case__ )
lowerCamelCase_ : Optional[Any] =xvector_output_dim
@property
def UpperCAmelCase__ ( self : List[Any] ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 352 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
A__ : List[str] = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowercase__ ( datasets.BuilderConfig ):
_UpperCAmelCase :int = 10000
_UpperCAmelCase :Optional[List[str]] = None
_UpperCAmelCase :Optional[datasets.Features] = None
class lowercase__ ( datasets.ArrowBasedBuilder ):
_UpperCAmelCase :Optional[int] = ParquetConfig
def UpperCAmelCase__ ( self : Optional[int] ):
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : List[Any] ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
lowerCamelCase_ : str =dl_manager.download_and_extract(self.config.data_files )
if isinstance(snake_case__ , (str, list, tuple) ):
lowerCamelCase_ : Dict =data_files
if isinstance(snake_case__ , snake_case__ ):
lowerCamelCase_ : List[Any] =[files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCamelCase_ : Any =[dl_manager.iter_files(snake_case__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
lowerCamelCase_ : Optional[int] =[]
for split_name, files in data_files.items():
if isinstance(snake_case__ , snake_case__ ):
lowerCamelCase_ : Optional[int] =[files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCamelCase_ : int =[dl_manager.iter_files(snake_case__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(snake_case__ ):
with open(snake_case__ , "rb" ) as f:
lowerCamelCase_ : List[Any] =datasets.Features.from_arrow_schema(pq.read_schema(snake_case__ ) )
break
splits.append(datasets.SplitGenerator(name=snake_case__ , gen_kwargs={"files": files} ) )
return splits
def UpperCAmelCase__ ( self : int , snake_case__ : pa.Table ):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCamelCase_ : List[str] =table_cast(snake_case__ , self.info.features.arrow_schema )
return pa_table
def UpperCAmelCase__ ( self : int , snake_case__ : Optional[Any] ):
lowerCamelCase_ : Tuple =self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(snake_case__ ) ):
with open(snake_case__ , "rb" ) as f:
lowerCamelCase_ : List[str] =pq.ParquetFile(snake_case__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
lowerCamelCase_ : Union[str, Any] =pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"""{file_idx}_{batch_idx}""", self._cast_table(snake_case__ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(snake_case__ )}: {e}""" )
raise
| 209 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __UpperCAmelCase (unittest.TestCase ):
def __init__( self: List[Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Dict=7 , UpperCAmelCase_: int=3 , UpperCAmelCase_: List[str]=18 , UpperCAmelCase_: Optional[Any]=30 , UpperCAmelCase_: List[str]=400 , UpperCAmelCase_: int=True , UpperCAmelCase_: Optional[int]=None , UpperCAmelCase_: Union[str, Any]=True , UpperCAmelCase_: Optional[Any]=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = size if size is not None else {"""shortest_edge""": 20}
_SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = min_resolution
_SCREAMING_SNAKE_CASE = max_resolution
_SCREAMING_SNAKE_CASE = do_resize
_SCREAMING_SNAKE_CASE = size
_SCREAMING_SNAKE_CASE = do_center_crop
_SCREAMING_SNAKE_CASE = crop_size
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __UpperCAmelCase (lowerCamelCase_ ,unittest.TestCase ):
__snake_case : Union[str, Any] = MobileNetVaImageProcessor if is_vision_available() else None
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = MobileNetVaImageProcessingTester(self )
@property
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , """do_resize""" ) )
self.assertTrue(hasattr(__snake_case , """size""" ) )
self.assertTrue(hasattr(__snake_case , """do_center_crop""" ) )
self.assertTrue(hasattr(__snake_case , """crop_size""" ) )
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def UpperCamelCase ( self: str ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 306 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case : Union[str, Any] = logging.get_logger(__name__)
snake_case : str = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : str = '''mobilenet_v2'''
def __init__( self :Optional[Any] ,__snake_case :List[Any]=3 ,__snake_case :Any=2_24 ,__snake_case :Dict=1.0 ,__snake_case :str=8 ,__snake_case :Union[str, Any]=8 ,__snake_case :Optional[Any]=6 ,__snake_case :str=32 ,__snake_case :Tuple=True ,__snake_case :Union[str, Any]=True ,__snake_case :Any="relu6" ,__snake_case :List[str]=True ,__snake_case :Dict=0.8 ,__snake_case :Optional[int]=0.02 ,__snake_case :Tuple=0.0_01 ,__snake_case :Dict=2_55 ,**__snake_case :Dict ,) -> Any:
super().__init__(**__snake_case )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
a__ = num_channels
a__ = image_size
a__ = depth_multiplier
a__ = depth_divisible_by
a__ = min_depth
a__ = expand_ratio
a__ = output_stride
a__ = first_layer_is_expansion
a__ = finegrained_output
a__ = hidden_act
a__ = tf_padding
a__ = classifier_dropout_prob
a__ = initializer_range
a__ = layer_norm_eps
a__ = semantic_loss_ignore_index
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Tuple = version.parse('''1.11''' )
@property
def lowerCamelCase__( self :Any ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def lowerCamelCase__( self :List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def lowerCamelCase__( self :Any ) -> float:
return 1E-4
| 240 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCamelCase : List[str] = logging.get_logger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : str = ["input_features", "attention_mask"]
def __init__( self : Any , UpperCAmelCase_ : Dict=8_0 , UpperCAmelCase_ : List[Any]=1_6_0_0_0 , UpperCAmelCase_ : Optional[Any]=8_0 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Union[str, Any]=True , **UpperCAmelCase_ : List[Any] , ):
"""simple docstring"""
super().__init__(feature_size=UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , padding_value=UpperCAmelCase_ , **UpperCAmelCase_)
a : Dict = num_mel_bins
a : List[str] = do_ceptral_normalize
a : Dict = normalize_means
a : int = normalize_vars
a : Any = True
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : np.ndarray , ):
"""simple docstring"""
a : str = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers
a : str = torch.from_numpy(UpperCAmelCase_).unsqueeze(0)
a : str = ta_kaldi.fbank(UpperCAmelCase_ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate)
return features.numpy()
@staticmethod
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[bool] = True , UpperCAmelCase_ : Optional[bool] = True , UpperCAmelCase_ : float = 0.0 , ):
"""simple docstring"""
if normalize_means:
a : int = x[:input_length].mean(axis=0)
a : Any = np.subtract(UpperCAmelCase_ , UpperCAmelCase_)
if normalize_vars:
a : List[str] = x[:input_length].std(axis=0)
a : Optional[int] = np.divide(UpperCAmelCase_ , UpperCAmelCase_)
if input_length < x.shape[0]:
a : List[str] = padding_value
# make sure array is in float32
a : Optional[int] = x.astype(np.floataa)
return x
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : List[np.ndarray] , UpperCAmelCase_ : Optional[np.ndarray] = None):
"""simple docstring"""
a : int = attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(UpperCAmelCase_ , UpperCAmelCase_ , self.normalize_means , self.normalize_vars , self.padding_value)
for x, n in zip(UpperCAmelCase_ , UpperCAmelCase_)
]
def __call__( self : str , UpperCAmelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCAmelCase_ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[bool] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""")
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.')
a : str = isinstance(UpperCAmelCase_ , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""")
a : Tuple = is_batched_numpy or (
isinstance(UpperCAmelCase_ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
a : Optional[Any] = [np.asarray(UpperCAmelCase_ , dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase_ , np.ndarray):
a : Tuple = np.asarray(UpperCAmelCase_ , dtype=np.floataa)
elif isinstance(UpperCAmelCase_ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
a : str = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
a : List[Any] = [raw_speech]
# extract fbank features
a : Any = [self._extract_fbank_features(UpperCAmelCase_) for waveform in raw_speech]
# convert into correct format for padding
a : List[str] = BatchFeature({'input_features': features})
a : List[Any] = self.pad(
UpperCAmelCase_ , padding=UpperCAmelCase_ , max_length=UpperCAmelCase_ , truncation=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , **UpperCAmelCase_ , )
# make sure list is in array format
a : Optional[int] = padded_inputs.get('input_features')
if isinstance(input_features[0] , UpperCAmelCase_):
a : List[str] = [np.asarray(UpperCAmelCase_ , dtype=np.floataa) for feature in input_features]
a : str = padded_inputs.get('attention_mask')
if attention_mask is not None:
a : Tuple = [np.asarray(UpperCAmelCase_ , dtype=np.intaa) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
a : Tuple = (
np.array(UpperCAmelCase_ , dtype=np.intaa)
if self._get_padding_strategies(UpperCAmelCase_ , max_length=UpperCAmelCase_) is not PaddingStrategy.DO_NOT_PAD
else None
)
a : Tuple = self.normalize(
padded_inputs['input_features'] , attention_mask=UpperCAmelCase_)
if return_tensors is not None:
a : int = padded_inputs.convert_to_tensors(UpperCAmelCase_)
return padded_inputs
| 345 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase : List[str] = {"""processing_layoutxlm""": ["""LayoutXLMProcessor"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = ["""LayoutXLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[int] = ["""LayoutXLMTokenizerFast"""]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 345 | 1 |
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 204 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
lowerCamelCase : List[Any] = None
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
lowerCamelCase : Optional[int] = {
"vocab_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model",
},
"tokenizer_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json",
},
}
lowerCamelCase : List[Any] = {
"google/fnet-base": 512,
"google/fnet-large": 512,
}
lowerCamelCase : Any = "▁"
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''token_type_ids''']
UpperCamelCase = FNetTokenizer
def __init__( self : Optional[int] , A_ : Any=None , A_ : int=None , A_ : int=False , A_ : Optional[int]=True , A_ : List[Any]=True , A_ : Tuple="<unk>" , A_ : Optional[int]="[SEP]" , A_ : List[Any]="<pad>" , A_ : Optional[int]="[CLS]" , A_ : Optional[Any]="[MASK]" , **A_ : Dict , ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = (
AddedToken(A_ , lstrip=A_ , rstrip=A_ , normalized=A_ )
if isinstance(A_ , A_ )
else mask_token
)
super().__init__(
A_ , tokenizer_file=A_ , do_lower_case=A_ , remove_space=A_ , keep_accents=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , **A_ , )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = remove_space
lowerCamelCase_ = keep_accents
lowerCamelCase_ = vocab_file
lowerCamelCase_ = False if not self.vocab_file else True
def a__ ( self : List[Any] , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a__ ( self : Tuple , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self : Dict , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file , A_ )
return (out_vocab_file,)
| 204 | 1 |
'''simple docstring'''
from math import isclose, sqrt
def _lowerCAmelCase ( lowercase , lowercase , lowercase ) -> tuple[float, float, float]:
__lowerCAmelCase = point_y / 4 / point_x
__lowerCAmelCase = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
__lowerCAmelCase = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
__lowerCAmelCase = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
__lowerCAmelCase = outgoing_gradient**2 + 4
__lowerCAmelCase = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
__lowerCAmelCase = (point_y - outgoing_gradient * point_x) ** 2 - 100
__lowerCAmelCase = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
__lowerCAmelCase = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
__lowerCAmelCase = x_minus if isclose(lowercase , lowercase ) else x_plus
__lowerCAmelCase = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def _lowerCAmelCase ( lowercase = 1.4 , lowercase = -9.6 ) -> int:
__lowerCAmelCase = 0
__lowerCAmelCase = first_x_coord
__lowerCAmelCase = first_y_coord
__lowerCAmelCase = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = next_point(lowercase , lowercase , lowercase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'{solution() = }')
| 46 |
'''simple docstring'''
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def _lowerCAmelCase ( lowercase , lowercase , lowercase = False ) -> list[float]:
if radian_mode:
return [magnitude * cos(lowercase ), magnitude * sin(lowercase )]
return [magnitude * cos(radians(lowercase ) ), magnitude * sin(radians(lowercase ) )]
def _lowerCAmelCase ( lowercase , lowercase , lowercase = 10**-1 ) -> bool:
__lowerCAmelCase = cross(lowercase , lowercase )
__lowerCAmelCase = sum(lowercase )
return abs(lowercase ) < eps
if __name__ == "__main__":
# Test to check if it works
_a : Any = array(
[
polar_force(718.4, 1_8_0 - 3_0),
polar_force(879.54, 4_5),
polar_force(1_0_0, -9_0),
]
)
_a : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
_a : List[Any] = array(
[
polar_force(3_0 * 9.81, 1_5),
polar_force(2_1_5, 1_8_0 - 4_5),
polar_force(2_6_4, 9_0 - 3_0),
]
)
_a : Optional[int] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
_a : Union[str, Any] = array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]])
_a : Optional[int] = array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 46 | 1 |
"""simple docstring"""
import os
import sys
import unittest
_lowerCamelCase : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_lowerCamelCase : str = os.path.join(git_repo_path, 'src', 'transformers')
_lowerCamelCase : List[Any] = '\n{0} = None\n'
_lowerCamelCase : Any = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
_lowerCamelCase : Union[str, Any] = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class lowercase ( unittest.TestCase):
def a_ ( self : int ):
"""simple docstring"""
A_ : Optional[Any] = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' )
self.assertIsNone(_lowerCamelCase )
A_ : Union[str, Any] = find_backend(''' if not is_tokenizers_available():''' )
self.assertEqual(_lowerCamelCase , '''tokenizers''' )
A_ : Any = find_backend(''' if not is_tensorflow_text_available():''' )
self.assertEqual(_lowerCamelCase , '''tensorflow_text''' )
A_ : int = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' )
self.assertEqual(_lowerCamelCase , '''sentencepiece_and_tokenizers''' )
A_ : Optional[int] = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' )
self.assertEqual(_lowerCamelCase , '''sentencepiece_and_tensorflow_text''' )
A_ : Any = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' )
self.assertEqual(_lowerCamelCase , '''sentencepiece_and_tokenizers_and_vision''' )
def a_ ( self : Dict ):
"""simple docstring"""
A_ : int = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , _lowerCamelCase )
self.assertIn('''tensorflow_text''' , _lowerCamelCase )
self.assertIn('''sentencepiece_and_tokenizers''' , _lowerCamelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertModel''' , objects['''tf'''] )
self.assertIn('''FlaxBertModel''' , objects['''flax'''] )
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] )
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] )
def a_ ( self : Optional[int] ):
"""simple docstring"""
A_ : Any = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(_lowerCamelCase , '''\nCONSTANT = None\n''' )
A_ : Optional[Any] = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
_lowerCamelCase , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
A_ : str = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
A_ : int = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def a_ ( self : int ):
"""simple docstring"""
A_ : Union[str, Any] = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
A_ : Dict = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , _lowerCamelCase )
| 167 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Any = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 167 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : List[str] = {
"BAAI/AltCLIP": "https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class __lowerCAmelCase ( lowerCAmelCase):
_a = '''altclip_text_model'''
def __init__( self: Optional[Any] , _lowerCAmelCase: str=25_00_02 , _lowerCAmelCase: Optional[int]=10_24 , _lowerCAmelCase: str=24 , _lowerCAmelCase: Any=16 , _lowerCAmelCase: str=40_96 , _lowerCAmelCase: str="gelu" , _lowerCAmelCase: List[Any]=0.1 , _lowerCAmelCase: int=0.1 , _lowerCAmelCase: Optional[Any]=5_14 , _lowerCAmelCase: List[str]=1 , _lowerCAmelCase: List[Any]=0.02 , _lowerCAmelCase: str=0.02 , _lowerCAmelCase: str=1e-0_5 , _lowerCAmelCase: str=1 , _lowerCAmelCase: str=0 , _lowerCAmelCase: Dict=2 , _lowerCAmelCase: List[str]="absolute" , _lowerCAmelCase: Tuple=True , _lowerCAmelCase: int=7_68 , **_lowerCAmelCase: Tuple , ):
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
lowercase :Tuple = vocab_size
lowercase :Any = hidden_size
lowercase :List[Any] = num_hidden_layers
lowercase :int = num_attention_heads
lowercase :Optional[int] = hidden_act
lowercase :Dict = intermediate_size
lowercase :Tuple = hidden_dropout_prob
lowercase :List[str] = attention_probs_dropout_prob
lowercase :Union[str, Any] = max_position_embeddings
lowercase :Optional[Any] = type_vocab_size
lowercase :Union[str, Any] = initializer_range
lowercase :Union[str, Any] = initializer_factor
lowercase :Dict = layer_norm_eps
lowercase :Tuple = position_embedding_type
lowercase :Optional[Any] = use_cache
lowercase :str = project_dim
class __lowerCAmelCase ( lowerCAmelCase):
_a = '''altclip_vision_model'''
def __init__( self: Any , _lowerCAmelCase: int=7_68 , _lowerCAmelCase: Any=30_72 , _lowerCAmelCase: Optional[int]=5_12 , _lowerCAmelCase: Optional[Any]=12 , _lowerCAmelCase: List[str]=12 , _lowerCAmelCase: Any=3 , _lowerCAmelCase: Optional[Any]=2_24 , _lowerCAmelCase: Any=32 , _lowerCAmelCase: Dict="quick_gelu" , _lowerCAmelCase: Tuple=1e-5 , _lowerCAmelCase: Union[str, Any]=0.0 , _lowerCAmelCase: List[str]=0.02 , _lowerCAmelCase: List[Any]=1.0 , **_lowerCAmelCase: int , ):
super().__init__(**_lowerCAmelCase )
lowercase :List[str] = hidden_size
lowercase :int = intermediate_size
lowercase :List[Any] = projection_dim
lowercase :List[str] = num_hidden_layers
lowercase :Optional[int] = num_attention_heads
lowercase :Optional[Any] = num_channels
lowercase :List[str] = patch_size
lowercase :Optional[int] = image_size
lowercase :List[Any] = initializer_range
lowercase :Dict = initializer_factor
lowercase :List[str] = attention_dropout
lowercase :List[Any] = layer_norm_eps
lowercase :Dict = hidden_act
@classmethod
def SCREAMING_SNAKE_CASE ( cls: Any , _lowerCAmelCase: Union[str, os.PathLike] , **_lowerCAmelCase: str ):
cls._set_token_in_kwargs(_lowerCAmelCase )
lowercase , lowercase :int = cls.get_config_dict(_lowerCAmelCase , **_lowerCAmelCase )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("model_type" ) == "altclip":
lowercase :str = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_lowerCAmelCase , **_lowerCAmelCase )
class __lowerCAmelCase ( lowerCAmelCase):
_a = '''altclip'''
_a = True
def __init__( self: Optional[int] , _lowerCAmelCase: List[str]=None , _lowerCAmelCase: List[Any]=None , _lowerCAmelCase: int=7_68 , _lowerCAmelCase: str=2.65_92 , **_lowerCAmelCase: List[Any] ):
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
lowercase :Optional[int] = kwargs.pop("text_config_dict" , _lowerCAmelCase )
lowercase :Optional[int] = kwargs.pop("vision_config_dict" , _lowerCAmelCase )
super().__init__(**_lowerCAmelCase )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
lowercase :Union[str, Any] = {}
# This is the complete result when using `text_config_dict`.
lowercase :str = AltCLIPTextConfig(**_lowerCAmelCase ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
lowercase :List[Any] = (
F"`{key}` is found in both `text_config_dict` and `text_config` but with different values. "
F"The value `text_config_dict[\"{key}\"]` will be used instead."
)
# If inferred from default argument values (just to be super careful)
else:
lowercase :List[Any] = (
F"`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The "
F"value `text_config[\"{key}\"]` will be overriden."
)
logger.warning(_lowerCAmelCase )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
lowercase :Optional[Any] = {}
# This is the complete result when using `vision_config_dict`.
lowercase :Tuple = AltCLIPVisionConfig(**_lowerCAmelCase ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
lowercase :int = {
str(_lowerCAmelCase ): value for key, value in _vision_config_dict["id2label"].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
lowercase :int = (
F"`{key}` is found in both `vision_config_dict` and `vision_config` but with different "
F"values. The value `vision_config_dict[\"{key}\"]` will be used instead."
)
# If inferred from default argument values (just to be super careful)
else:
lowercase :Tuple = (
F"`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. "
F"The value `vision_config[\"{key}\"]` will be overriden."
)
logger.warning(_lowerCAmelCase )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
lowercase :Optional[int] = {}
logger.info("`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values." )
if vision_config is None:
lowercase :Tuple = {}
logger.info("`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values." )
lowercase :Any = AltCLIPTextConfig(**_lowerCAmelCase )
lowercase :Optional[Any] = AltCLIPVisionConfig(**_lowerCAmelCase )
lowercase :Tuple = projection_dim
lowercase :List[str] = logit_scale_init_value
lowercase :int = 1.0
@classmethod
def SCREAMING_SNAKE_CASE ( cls: List[Any] , _lowerCAmelCase: AltCLIPTextConfig , _lowerCAmelCase: AltCLIPVisionConfig , **_lowerCAmelCase: Optional[int] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: str ):
lowercase :Optional[int] = copy.deepcopy(self.__dict__ )
lowercase :Union[str, Any] = self.text_config.to_dict()
lowercase :Tuple = self.vision_config.to_dict()
lowercase :List[str] = self.__class__.model_type
return output
| 158 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
_UpperCAmelCase : Optional[Any] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f'''{bindir}/../../examples/pytorch/translation'''):
from run_translation import main # noqa
set_seed(42)
_UpperCAmelCase : Union[str, Any] = "sshleifer/student_marian_en_ro_6_1"
_UpperCAmelCase : Any = "sshleifer/tiny-mbart"
@require_torch
class __lowerCAmelCase ( lowerCAmelCase):
def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: int=False , _lowerCAmelCase: str=None , _lowerCAmelCase: Dict=True , _lowerCAmelCase: Dict=True , _lowerCAmelCase: Optional[int]=True , _lowerCAmelCase: Union[str, Any]=True , ):
lowercase :Any = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=_lowerCAmelCase , num_train_epochs=1 , distributed=_lowerCAmelCase , extra_args_str=_lowerCAmelCase , predict_with_generate=_lowerCAmelCase , do_train=_lowerCAmelCase , do_eval=_lowerCAmelCase , do_predict=_lowerCAmelCase , )
lowercase :List[Any] = TrainerState.load_from_json(os.path.join(_lowerCAmelCase , "trainer_state.json" ) ).log_history
if not do_eval:
return
lowercase :Union[str, Any] = [log for log in logs if "eval_loss" in log.keys()]
lowercase :Any = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
lowercase :Optional[Any] = eval_metrics[-1]
assert isinstance(last_step_stats["eval_bleu"] , _lowerCAmelCase )
assert not math.isnan(float(last_step_stats["eval_loss"] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
self.run_seqaseq_quick()
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE ( self: str ):
self.run_seqaseq_quick(distributed=_lowerCAmelCase )
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE ( self: Tuple ):
self.run_seqaseq_quick(distributed=_lowerCAmelCase )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
self.run_seqaseq_quick(distributed=_lowerCAmelCase , extra_args_str="--sharded_ddp simple" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
self.run_seqaseq_quick(distributed=_lowerCAmelCase , extra_args_str="--sharded_ddp simple --fp16" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE ( self: Dict ):
self.run_seqaseq_quick(distributed=_lowerCAmelCase , extra_args_str="--sharded_ddp zero_dp_2" , predict_with_generate=_lowerCAmelCase )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
self.run_seqaseq_quick(
distributed=_lowerCAmelCase , extra_args_str="--sharded_ddp zero_dp_2 --fp16" , predict_with_generate=_lowerCAmelCase )
@require_apex
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=_lowerCAmelCase , extra_args_str="--fp16 --fp16_backend=apex" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=_lowerCAmelCase , extra_args_str="--fp16 --fp16_backend=apex" )
@parameterized.expand(["base", "low", "high", "mixed"] )
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: Any ):
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
lowercase :List[Any] = {
# test with the default log_level - should be info and thus log info once
"base": {"extra_args_str": "", "n_matches": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0},
}
lowercase :str = experiments[experiment_id]
lowercase :Dict = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False}
lowercase :List[str] = "Running training"
with CaptureStderr() as cl:
self.run_seqaseq_quick(**_lowerCAmelCase , extra_args_str=data["extra_args_str"] )
lowercase :Dict = len(re.findall(_lowerCAmelCase , cl.err ) )
self.assertEqual(_lowerCAmelCase , data["n_matches"] )
@slow
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :Dict = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=_lowerCAmelCase , learning_rate=3e-4 , num_train_epochs=10 , distributed=_lowerCAmelCase , )
# Check metrics
lowercase :List[str] = TrainerState.load_from_json(os.path.join(_lowerCAmelCase , "trainer_state.json" ) ).log_history
lowercase :Dict = [log for log in logs if "eval_loss" in log.keys()]
lowercase :str = eval_metrics[0]
lowercase :Optional[int] = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["eval_bleu"] , _lowerCAmelCase )
# test if do_predict saves generations and metrics
lowercase :Optional[Any] = os.listdir(_lowerCAmelCase )
lowercase :List[str] = {os.path.basename(_lowerCAmelCase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def SCREAMING_SNAKE_CASE ( self: Tuple ):
from transformers.training_args import OptimizerNames
def train_and_return_metrics(_lowerCAmelCase: str ) -> Tuple[int, float]:
lowercase :Tuple = "--skip_memory_metrics 0"
lowercase :List[str] = self.run_trainer(
max_len=1_28 , model_name=_lowerCAmelCase , learning_rate=3e-4 , num_train_epochs=1 , optim=_lowerCAmelCase , distributed=_lowerCAmelCase , extra_args_str=_lowerCAmelCase , do_eval=_lowerCAmelCase , do_predict=_lowerCAmelCase , n_gpus_to_use=1 , )
# Check metrics
lowercase :List[str] = TrainerState.load_from_json(Path(_lowerCAmelCase , "trainer_state.json" ) ).log_history
lowercase :Dict = int(logs[0]["train_mem_gpu_peaked_delta"] / 2**20 )
lowercase :Any = int(logs[0]["train_mem_gpu_alloc_delta"] / 2**20 )
lowercase :List[str] = logs[0]["train_loss"]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
lowercase , lowercase , lowercase :Optional[Any] = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
lowercase , lowercase , lowercase :List[str] = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
lowercase :List[Any] = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
lowercase :List[str] = gpu_peak_mem_orig + gpu_alloc_mem_orig
lowercase :List[str] = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
lowercase :Tuple = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
lowercase :Union[str, Any] = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
_lowerCAmelCase , _lowerCAmelCase , "should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"
F" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"
F" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , )
self.assertGreater(
_lowerCAmelCase , _lowerCAmelCase , "should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"
F" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"
F" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , )
self.assertEqual(
_lowerCAmelCase , _lowerCAmelCase , F"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}" )
def SCREAMING_SNAKE_CASE ( self: Dict , _lowerCAmelCase: int , _lowerCAmelCase: str , _lowerCAmelCase: int , _lowerCAmelCase: float = 3e-3 , _lowerCAmelCase: str = "adafactor" , _lowerCAmelCase: bool = False , _lowerCAmelCase: str = None , _lowerCAmelCase: int = 0 , _lowerCAmelCase: bool = True , _lowerCAmelCase: bool = True , _lowerCAmelCase: bool = True , _lowerCAmelCase: bool = True , _lowerCAmelCase: int = None , ):
lowercase :Optional[int] = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro"
lowercase :Optional[Any] = self.get_auto_remove_tmp_dir()
lowercase :Tuple = F"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(_lowerCAmelCase )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(_lowerCAmelCase )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split()
lowercase :Union[str, Any] = F"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(_lowerCAmelCase )}\n ".split()
lowercase :str = "\n --do_predict\n ".split()
lowercase :Union[str, Any] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F"--optim {optim}".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
lowercase :Optional[int] = get_gpu_count()
lowercase :str = get_torch_dist_unique_port()
lowercase :Union[str, Any] = F"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split()
lowercase :Optional[int] = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_lowerCAmelCase , env=self.get_env() )
else:
lowercase :Tuple = ["run_translation.py"] + args
with patch.object(_lowerCAmelCase , "argv" , _lowerCAmelCase ):
main()
return output_dir
| 158 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
lowercase : Optional[Any] = None
lowercase : Tuple = logging.get_logger(__name__)
lowercase : Dict = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowercase : Tuple = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
lowercase : Optional[Any] = {
'google/fnet-base': 512,
'google/fnet-large': 512,
}
lowercase : List[str] = '▁'
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ['input_ids', 'token_type_ids']
_A = FNetTokenizer
def __init__( self :List[Any] , a :List[Any]=None , a :List[str]=None , a :str=False , a :Dict=True , a :Any=True , a :Any="<unk>" , a :str="[SEP]" , a :Optional[int]="<pad>" , a :Tuple="[CLS]" , a :Tuple="[MASK]" , **a :Optional[int] , ) -> Tuple:
__UpperCamelCase : Dict = (
AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase , normalized=__lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase )
else mask_token
)
super().__init__(
__lowerCAmelCase , tokenizer_file=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , remove_space=__lowerCAmelCase , keep_accents=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , **__lowerCAmelCase , )
__UpperCamelCase : Union[str, Any] = do_lower_case
__UpperCamelCase : Optional[Any] = remove_space
__UpperCamelCase : List[str] = keep_accents
__UpperCamelCase : Optional[Any] = vocab_file
__UpperCamelCase : List[str] = False if not self.vocab_file else True
def _lowerCamelCase ( self :List[str] , a :Tuple , a :str = None ) -> Optional[int]:
__UpperCamelCase : Optional[int] = [self.sep_token_id]
__UpperCamelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCamelCase ( self :str , a :int , a :Union[str, Any] = None ) -> int:
__UpperCamelCase : Union[str, Any] = [self.sep_token_id]
__UpperCamelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self :List[str] , a :Union[str, Any] , a :Optional[int] = None ) -> Optional[int]:
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase : str = os.path.join(
__lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ):
copyfile(self.vocab_file , __lowerCAmelCase )
return (out_vocab_file,) | 232 |
def lowerCAmelCase__(__snake_case ,__snake_case ) -> float:
'''simple docstring'''
if mass < 0:
raise ValueError('''The mass of a body cannot be negative''' )
return 0.5 * mass * abs(__snake_case ) * abs(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 209 | 0 |
'''simple docstring'''
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def lowerCamelCase__ ( A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = checkpoints.load_tax_checkpoint(A )
UpperCAmelCase = flatten_dict(A )
return flax_params
def lowerCamelCase__ ( A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = {}
UpperCAmelCase = {
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
UpperCAmelCase = {
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
UpperCAmelCase = '''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
UpperCAmelCase = new_key.replace(A , A )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
UpperCAmelCase = new_key.replace(A , A )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
UpperCAmelCase = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , A )
UpperCAmelCase = new_key.replace('''encoder''' , '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
UpperCAmelCase = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , A )
UpperCAmelCase = flax_dict[key]
UpperCAmelCase = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
UpperCAmelCase = torch.from_numpy(converted_dict[key].T )
else:
UpperCAmelCase = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def lowerCamelCase__ ( A : str , A : int , A : List[str]=False , A : str=False ):
'''simple docstring'''
UpperCAmelCase = get_flax_param(A )
if not use_large:
UpperCAmelCase = PixaStructVisionConfig()
UpperCAmelCase = PixaStructTextConfig()
else:
UpperCAmelCase = PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 )
UpperCAmelCase = PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 )
UpperCAmelCase = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=A )
UpperCAmelCase = PixaStructForConditionalGeneration(A )
UpperCAmelCase = rename_and_convert_flax_params(A )
model.load_state_dict(A )
UpperCAmelCase = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
UpperCAmelCase = PixaStructImageProcessor()
UpperCAmelCase = PixaStructProcessor(image_processor=A , tokenizer=A )
if use_large:
UpperCAmelCase = 40_96
UpperCAmelCase = True
# mkdir if needed
os.makedirs(A , exist_ok=A )
model.save_pretrained(A )
processor.save_pretrained(A )
print('''Model saved in {}'''.format(A ) )
if __name__ == "__main__":
_lowercase : int = argparse.ArgumentParser()
parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""")
parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""")
_lowercase : Union[str, Any] = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 91 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : List[Any] = ["image_processor", "tokenizer"]
__magic_name__ : Tuple = "ViTImageProcessor"
__magic_name__ : int = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : List[str] , lowerCAmelCase : Tuple=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : Optional[int] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCAmelCase , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase , lowerCAmelCase )
def __call__( self : int , lowerCAmelCase : Dict=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : int=None , **lowerCAmelCase : Tuple )-> Optional[int]:
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError('''You have to specify either text, visual prompt or images.''' )
if text is not None and visual_prompt is not None:
raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if visual_prompt is not None:
UpperCAmelCase = self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if images is not None:
UpperCAmelCase = self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if visual_prompt is not None and images is not None:
UpperCAmelCase = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
UpperCAmelCase = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase ) , tensor_type=lowerCAmelCase )
def a__( self : Optional[int] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Dict )-> Tuple:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase )
def a__( self : List[Any] , *lowerCAmelCase : str , **lowerCAmelCase : List[Any] )-> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase )
@property
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCAmelCase , )
return self.image_processor_class
@property
def a__( self : str )-> List[Any]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCAmelCase , )
return self.image_processor
| 91 | 1 |
from collections import defaultdict
def lowerCamelCase_ ( _a : str , _a : str ):
'''simple docstring'''
UpperCAmelCase_ : str = first_str.lower().strip()
UpperCAmelCase_ : Optional[Any] = second_str.lower().strip()
# Remove whitespace
UpperCAmelCase_ : Optional[Any] = first_str.replace(""" """ , """""" )
UpperCAmelCase_ : int = second_str.replace(""" """ , """""" )
# Strings of different lengths are not anagrams
if len(_a ) != len(_a ):
return False
# Default values for count should be 0
UpperCAmelCase_ : defaultdict[str, int] = defaultdict(_a )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(_a ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase_ = input('''Enter the first string ''').strip()
UpperCamelCase_ = input('''Enter the second string ''').strip()
UpperCamelCase_ = check_anagrams(input_a, input_b)
print(F"{input_a} and {input_b} are {'' if status else 'not '}anagrams.")
| 345 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class _snake_case :
'''simple docstring'''
def __init__( self: Optional[int] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Tuple=13 ,lowerCamelCase_: int=7 ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Dict=True ,lowerCamelCase_: str=True ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: int=99 ,lowerCamelCase_: List[str]=64 ,lowerCamelCase_: Tuple=32 ,lowerCamelCase_: List[str]=5 ,lowerCamelCase_: str=4 ,lowerCamelCase_: str=37 ,lowerCamelCase_: Union[str, Any]="gelu" ,lowerCamelCase_: Union[str, Any]=0.1 ,lowerCamelCase_: str=0.1 ,lowerCamelCase_: List[str]=512 ,lowerCamelCase_: Dict=16 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: List[str]=0.0_2 ,lowerCamelCase_: Optional[Any]=3 ,lowerCamelCase_: Union[str, Any]=4 ,lowerCamelCase_: str=None ,) -> List[str]:
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : Union[str, Any] = seq_length
UpperCAmelCase_ : Optional[int] = is_training
UpperCAmelCase_ : Dict = use_input_mask
UpperCAmelCase_ : Any = use_token_type_ids
UpperCAmelCase_ : Tuple = use_labels
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : List[str] = embedding_size
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : Tuple = hidden_act
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : List[str] = type_vocab_size
UpperCAmelCase_ : Any = type_sequence_label_size
UpperCAmelCase_ : Optional[Any] = initializer_range
UpperCAmelCase_ : Optional[int] = num_labels
UpperCAmelCase_ : Optional[int] = num_choices
UpperCAmelCase_ : List[str] = scope
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : List[str] = None
if self.use_input_mask:
UpperCAmelCase_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Dict = None
if self.use_token_type_ids:
UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase_ : int = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase_ : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self: Any ) -> Dict:
return MobileBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,embedding_size=self.embedding_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowerCamelCase_ ,initializer_range=self.initializer_range ,)
def A__ ( self: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: str ) -> int:
UpperCAmelCase_ : Any = MobileBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,token_type_ids=lowerCamelCase_ )
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def A__ ( self: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Dict ) -> int:
UpperCAmelCase_ : Union[str, Any] = MobileBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self: str ,lowerCamelCase_: Any ,lowerCamelCase_: Dict ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: int ) -> int:
UpperCAmelCase_ : List[Any] = MobileBertForNextSentencePrediction(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def A__ ( self: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = MobileBertForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,next_sentence_label=lowerCamelCase_ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def A__ ( self: Any ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = MobileBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : int = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,start_positions=lowerCamelCase_ ,end_positions=lowerCamelCase_ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def A__ ( self: List[str] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Any ) -> str:
UpperCAmelCase_ : Optional[Any] = self.num_labels
UpperCAmelCase_ : Union[str, Any] = MobileBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ) -> Any:
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : Optional[int] = MobileBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self: Tuple ,lowerCamelCase_: str ,lowerCamelCase_: int ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = self.num_choices
UpperCAmelCase_ : Tuple = MobileBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Dict = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : str = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : Optional[int] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def A__ ( self: List[str] ) -> str:
UpperCAmelCase_ : str = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase_ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Dict = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
A__ : List[str] = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ : List[str] = True
def A__ ( self: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: int=False ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = super()._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ,return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
UpperCAmelCase_ : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowerCamelCase_ )
return inputs_dict
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : List[str] = MobileBertModelTester(self )
UpperCAmelCase_ : Union[str, Any] = ConfigTester(self ,config_class=lowerCamelCase_ ,hidden_size=37 )
def A__ ( self: Optional[Any] ) -> List[Any]:
self.config_tester.run_common_tests()
def A__ ( self: List[str] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ )
def A__ ( self: Optional[int] ) -> Optional[int]:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Tuple:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ )
def A__ ( self: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ )
def A__ ( self: Optional[int] ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ )
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( _a : Union[str, Any] ):
'''simple docstring'''
return torch.tensor(
_a , dtype=torch.long , device=_a , )
UpperCamelCase_ = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self: List[Any] ) -> str:
UpperCAmelCase_ : Any = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : str = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ )[0]
UpperCAmelCase_ : Union[str, Any] = torch.Size((1, 9, 512) )
self.assertEqual(output.shape ,lowerCamelCase_ )
UpperCAmelCase_ : Tuple = torch.tensor(
[
[
[-2.473_6526e07, 8.269_1656e04, 1.652_1838e05],
[-5.754_1704e-01, 3.905_6022e00, 4.401_1507e00],
[2.604_7359e00, 1.567_7652e00, -1.732_4188e-01],
]
] ,device=lowerCamelCase_ ,)
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
UpperCAmelCase_ : Dict = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
UpperCAmelCase_ : Dict = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 345 | 1 |
'''simple docstring'''
import math
import qiskit
def __snake_case( _lowerCAmelCase = 1 , _lowerCAmelCase = 1 , _lowerCAmelCase = 1 ) -> qiskit.result.counts.Counts:
if (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
or isinstance(_lowerCAmelCase , _lowerCAmelCase )
or isinstance(_lowerCAmelCase , _lowerCAmelCase )
):
raise TypeError("""inputs must be integers.""" )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("""inputs must be positive.""" )
if (
(math.floor(_lowerCAmelCase ) != input_a)
or (math.floor(_lowerCAmelCase ) != input_a)
or (math.floor(_lowerCAmelCase ) != carry_in)
):
raise ValueError("""inputs must be exact integers.""" )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("""inputs must be less or equal to 2.""" )
# build registers
snake_case__ : List[str] = qiskit.QuantumRegister(4 , """qr""" )
snake_case__ : Optional[int] = qiskit.ClassicalRegister(2 , """cr""" )
# list the entries
snake_case__ : List[Any] = [input_a, input_a, carry_in]
snake_case__ : Union[str, Any] = qiskit.QuantumCircuit(_lowerCAmelCase , _lowerCAmelCase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(_lowerCAmelCase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(_lowerCAmelCase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(_lowerCAmelCase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , _lowerCAmelCase ) # measure the last two qbits
snake_case__ : int = qiskit.Aer.get_backend("""aer_simulator""" )
snake_case__ : Tuple = qiskit.execute(_lowerCAmelCase , _lowerCAmelCase , shots=1_000 )
return job.result().get_counts(_lowerCAmelCase )
if __name__ == "__main__":
print(F"Total sum count for state is: {quantum_full_adder(1, 1, 1)}")
| 43 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
lowercase = MvpTokenizer
lowercase = MvpTokenizerFast
lowercase = True
lowercase = filter_roberta_detectors
def lowerCamelCase ( self : Union[str, Any] ):
super().setUp()
snake_case__ : List[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
snake_case__ : Union[str, Any] = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
snake_case__ : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
snake_case__ : Tuple = {"""unk_token""": """<unk>"""}
snake_case__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(snake_case_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(snake_case_ ) )
def lowerCamelCase ( self : Optional[Any] , **snake_case_ : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCamelCase ( self : List[str] , **snake_case_ : List[str] ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCamelCase ( self : List[Any] , snake_case_ : List[str] ):
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase ( self : int ):
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""" )
@cached_property
def lowerCamelCase ( self : Tuple ):
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""" )
@require_torch
def lowerCamelCase ( self : Any ):
snake_case__ : List[str] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
snake_case__ : Dict = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : List[Any] = tokenizer(snake_case_ , max_length=len(snake_case_ ) , padding=snake_case_ , return_tensors="""pt""" )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
snake_case__ : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case_ , snake_case_ )
# Test that special tokens are reset
@require_torch
def lowerCamelCase ( self : Dict ):
snake_case__ : List[str] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : List[Any] = tokenizer(snake_case_ , padding=snake_case_ , return_tensors="""pt""" )
# check if input_ids are returned and no labels
self.assertIn("""input_ids""" , snake_case_ )
self.assertIn("""attention_mask""" , snake_case_ )
self.assertNotIn("""labels""" , snake_case_ )
self.assertNotIn("""decoder_attention_mask""" , snake_case_ )
@require_torch
def lowerCamelCase ( self : Tuple ):
snake_case__ : Any = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : str = tokenizer(text_target=snake_case_ , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def lowerCamelCase ( self : List[str] ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : Optional[Any] = tokenizer(
["""I am a small frog""" * 1_024, """I am a small frog"""] , padding=snake_case_ , truncation=snake_case_ , return_tensors="""pt""" )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual(batch.input_ids.shape , (2, 1_024) )
@require_torch
def lowerCamelCase ( self : str ):
snake_case__ : Optional[Any] = ["""A long paragraph for summarization."""]
snake_case__ : Optional[Any] = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : int = tokenizer(snake_case_ , text_target=snake_case_ , return_tensors="""pt""" )
snake_case__ : int = inputs["""input_ids"""]
snake_case__ : str = inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def lowerCamelCase ( self : int ):
pass
def lowerCamelCase ( self : str ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : int = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
snake_case__ : Optional[int] = self.tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
snake_case__ : Optional[Any] = """A, <mask> AllenNLP sentence."""
snake_case__ : List[str] = tokenizer_r.encode_plus(snake_case_ , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ )
snake_case__ : int = tokenizer_p.encode_plus(snake_case_ , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
snake_case__ : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
snake_case__ : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
snake_case_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
snake_case_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 43 | 1 |
"""simple docstring"""
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE__ = logging.getLogger()
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""-f""" )
lowerCAmelCase = parser.parse_args()
return args.f
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase = {}
lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE , """all_results.json""" )
if os.path.exists(SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE , """r""" ) as f:
lowerCAmelCase = json.load(SCREAMING_SNAKE_CASE )
else:
raise ValueError(F'can\'t find {path}' )
return results
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = torch.cuda.is_available() and torch_device == """cuda"""
return is_using_cuda and is_apex_available()
SCREAMING_SNAKE_CASE__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase ( _UpperCAmelCase ):
@classmethod
def _snake_case ( cls ) -> List[str]:
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = os.path.join(cls.tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
lowerCAmelCase = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def _snake_case ( cls ) -> List[Any]:
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = f'\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
lowerCAmelCase = get_results(lowercase )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(lowercase , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase , """glue_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def _snake_case ( self ) -> int:
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = f'\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
lowerCAmelCase = get_results(lowercase )
self.assertLess(result["""perplexity"""] , 100 )
self.assertTrue(os.path.exists(os.path.join(lowercase , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase , """clm_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = f'\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowerCAmelCase = get_results(lowercase )
self.assertLess(result["""perplexity"""] , 42 )
self.assertTrue(os.path.exists(os.path.join(lowercase , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase , """mlm_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def _snake_case ( self ) -> Dict:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
lowerCAmelCase = 7 if get_gpu_count() > 1 else 2
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = f'\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowerCAmelCase = get_results(lowercase )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
self.assertLess(result["""train_loss"""] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(lowercase , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase , """ner_no_trainer""" ) ) )
@unittest.skip(reason="""Fix me @muellerzr""" )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = f'\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowerCAmelCase = get_results(lowercase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["""eval_f1"""] , 28 )
self.assertGreaterEqual(result["""eval_exact"""] , 28 )
self.assertTrue(os.path.exists(os.path.join(lowercase , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase , """qa_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = f'\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowerCAmelCase = get_results(lowercase )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(lowercase , """swag_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = f'\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowerCAmelCase = get_results(lowercase )
self.assertGreaterEqual(result["""eval_rouge1"""] , 10 )
self.assertGreaterEqual(result["""eval_rouge2"""] , 2 )
self.assertGreaterEqual(result["""eval_rougeL"""] , 7 )
self.assertGreaterEqual(result["""eval_rougeLsum"""] , 7 )
self.assertTrue(os.path.exists(os.path.join(lowercase , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase , """summarization_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def _snake_case ( self ) -> str:
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = f'\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
lowerCAmelCase = get_results(lowercase )
self.assertGreaterEqual(result["""eval_bleu"""] , 30 )
self.assertTrue(os.path.exists(os.path.join(lowercase , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase , """translation_no_trainer""" ) ) )
@slow
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = logging.StreamHandler(sys.stdout )
logger.addHandler(lowercase )
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = f'\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n '.split()
run_command(self._launch_args + testargs )
lowerCAmelCase = get_results(lowercase )
self.assertGreaterEqual(result["""eval_overall_accuracy"""] , 0.10 )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = f'\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n '.split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
lowerCAmelCase = get_results(lowercase )
# The base model scores a 25%
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(lowercase , """step_1""" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase , """image_classification_no_trainer""" ) ) )
| 46 |
"""simple docstring"""
import re
import string
import numpy as np
import datasets
SCREAMING_SNAKE_CASE__ = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n"
SCREAMING_SNAKE_CASE__ = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n"
SCREAMING_SNAKE_CASE__ = "\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def _snake_case ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def _snake_case ( self , lowercase , lowercase , lowercase=None , lowercase=False , lowercase=False , lowercase=False , ) -> Optional[Any]:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
lowerCAmelCase = np.array([re.sub(lowercase , """""" , lowercase ) for x in predictions] )
lowerCAmelCase = np.array([re.sub(lowercase , """""" , lowercase ) for x in references] )
else:
lowerCAmelCase = np.asarray(lowercase )
lowerCAmelCase = np.asarray(lowercase )
if ignore_case:
lowerCAmelCase = np.char.lower(lowercase )
lowerCAmelCase = np.char.lower(lowercase )
if ignore_punctuation:
lowerCAmelCase = string.punctuation.maketrans("""""" , """""" , string.punctuation )
lowerCAmelCase = np.char.translate(lowercase , table=lowercase )
lowerCAmelCase = np.char.translate(lowercase , table=lowercase )
if ignore_numbers:
lowerCAmelCase = string.digits.maketrans("""""" , """""" , string.digits )
lowerCAmelCase = np.char.translate(lowercase , table=lowercase )
lowerCAmelCase = np.char.translate(lowercase , table=lowercase )
lowerCAmelCase = predictions == references
return {"exact_match": np.mean(lowercase ) * 100}
| 46 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
__magic_name__ = 4_2
__magic_name__ = None
__magic_name__ = None
lowerCAmelCase : Any = namedtuple("""CoinsDistribResult""", """moves excess""")
def lowercase (_A ):
"""simple docstring"""
if root is None:
return 0
# Validation
def count_nodes(_A ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_A ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_A ) != count_coins(_A ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(_A ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
_lowerCAmelCase : Optional[Any] = get_distrib(node.left )
_lowerCAmelCase : int = get_distrib(node.right )
_lowerCAmelCase : int = 1 - left_distrib_excess
_lowerCAmelCase : int = 1 - right_distrib_excess
_lowerCAmelCase : str = (
left_distrib_moves
+ right_distrib_moves
+ abs(_A )
+ abs(_A )
)
_lowerCAmelCase : Any = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_A , _A )
return get_distrib(_A )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {
"""CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": (
"""https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"""
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "trajectory_transformer"
__magic_name__ = ["past_key_values"]
__magic_name__ = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , snake_case__=100 , snake_case__=5 , snake_case__=1 , snake_case__=1 , snake_case__=249 , snake_case__=6 , snake_case__=17 , snake_case__=25 , snake_case__=4 , snake_case__=4 , snake_case__=128 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0006 , snake_case__=512 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=1 , snake_case__=True , snake_case__=1 , snake_case__=5_0256 , snake_case__=5_0256 , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Any = action_weight
_lowerCAmelCase : Optional[int] = reward_weight
_lowerCAmelCase : Union[str, Any] = value_weight
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : Tuple = block_size
_lowerCAmelCase : List[Any] = action_dim
_lowerCAmelCase : List[Any] = observation_dim
_lowerCAmelCase : Union[str, Any] = transition_dim
_lowerCAmelCase : Tuple = learning_rate
_lowerCAmelCase : int = n_layer
_lowerCAmelCase : Any = n_head
_lowerCAmelCase : Tuple = n_embd
_lowerCAmelCase : Optional[Any] = embd_pdrop
_lowerCAmelCase : Union[str, Any] = attn_pdrop
_lowerCAmelCase : Any = resid_pdrop
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : List[Any] = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = kaiming_initializer_range
_lowerCAmelCase : List[Any] = use_cache
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
| 25 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def __a(SCREAMING_SNAKE_CASE_ : list ):
'''simple docstring'''
if not postfix_notation:
return 0
_lowerCAmelCase = {"+", "-", "*", "/"}
_lowerCAmelCase = []
for token in postfix_notation:
if token in operations:
_lowerCAmelCase , _lowerCAmelCase = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(SCREAMING_SNAKE_CASE_ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 158 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
while a != 0:
_lowerCAmelCase , _lowerCAmelCase = b % a, a
return b
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if gcd(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) != 1:
_lowerCAmelCase = F'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1, 0, a
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0, 1, m
while va != 0:
_lowerCAmelCase = ua // va
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 158 | 1 |
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
SCREAMING_SNAKE_CASE_ = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
SCREAMING_SNAKE_CASE_ = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int ) -> str:
assert len(str(lowerCAmelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_UpperCAmelCase : List[Any] = year // 100
_UpperCAmelCase : Dict = (5 * (century % 4) + 2) % 7
_UpperCAmelCase : List[str] = year % 100
_UpperCAmelCase : int = centurian % 12
_UpperCAmelCase : int = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_UpperCAmelCase : str = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_UpperCAmelCase : Any = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: int=False ) -> Optional[Any]:
_UpperCAmelCase : Union[str, Any] = []
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token") )
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") )
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") )
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCAmelCase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
# fmt: on
return rename_keys
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Dict , lowerCAmelCase: Union[str, Any] , lowerCAmelCase: List[str]=False ) -> int:
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase : Optional[Any] = ""
else:
_UpperCAmelCase : Dict = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase : List[str] = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
_UpperCAmelCase : Tuple = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : List[str] = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase : Dict = in_proj_bias[: config.hidden_size]
_UpperCAmelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase : int = in_proj_bias[-config.hidden_size :]
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int ) -> Optional[int]:
_UpperCAmelCase : Dict = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase , lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any , lowerCAmelCase: Optional[int] , lowerCAmelCase: Dict ) -> Tuple:
_UpperCAmelCase : str = dct.pop(lowerCAmelCase )
_UpperCAmelCase : Any = val
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
_UpperCAmelCase : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase : Tuple = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Tuple , lowerCAmelCase: int , lowerCAmelCase: List[Any]=False ) -> Any:
_UpperCAmelCase : List[Any] = BitConfig(
global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=lowerCAmelCase , )
_UpperCAmelCase : Optional[Any] = ViTHybridConfig(backbone_config=lowerCAmelCase , image_size=384 , num_labels=1000 )
_UpperCAmelCase : str = False
# load original model from timm
_UpperCAmelCase : Optional[Any] = timm.create_model(lowerCAmelCase , pretrained=lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCAmelCase : str = timm_model.state_dict()
if base_model:
remove_classification_head_(lowerCAmelCase )
_UpperCAmelCase : str = create_rename_keys(lowerCAmelCase , lowerCAmelCase )
for src, dest in rename_keys:
rename_key(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
read_in_q_k_v(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
_UpperCAmelCase : str = "huggingface/label-files"
_UpperCAmelCase : Tuple = "imagenet-1k-id2label.json"
_UpperCAmelCase : Union[str, Any] = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase : Any = {int(lowerCAmelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase : Dict = idalabel
_UpperCAmelCase : Dict = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
_UpperCAmelCase : Union[str, Any] = ViTHybridModel(lowerCAmelCase ).eval()
else:
_UpperCAmelCase : Optional[Any] = ViTHybridForImageClassification(lowerCAmelCase ).eval()
model.load_state_dict(lowerCAmelCase )
# create image processor
_UpperCAmelCase : Any = create_transform(**resolve_data_config({} , model=lowerCAmelCase ) )
_UpperCAmelCase : Tuple = transform.transforms
_UpperCAmelCase : Tuple = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
_UpperCAmelCase : Any = ViTHybridImageProcessor(
do_resize=lowerCAmelCase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowerCAmelCase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=lowerCAmelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
_UpperCAmelCase : List[str] = prepare_img()
_UpperCAmelCase : List[Any] = transform(lowerCAmelCase ).unsqueeze(0 )
_UpperCAmelCase : Optional[Any] = processor(lowerCAmelCase , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(lowerCAmelCase , lowerCAmelCase )
# verify logits
with torch.no_grad():
_UpperCAmelCase : Union[str, Any] = model(lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = outputs.logits
print("Predicted class:" , logits.argmax(-1 ).item() )
if base_model:
_UpperCAmelCase : List[Any] = timm_model.forward_features(lowerCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowerCAmelCase , outputs.pooler_output , atol=1E-3 )
else:
_UpperCAmelCase : Any = timm_model(lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase , outputs.logits , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
print(F'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowerCAmelCase )
if push_to_hub:
print(F'Pushing model and processor to the hub {vit_name}' )
model.push_to_hub(F'ybelkada/{vit_name}' )
processor.push_to_hub(F'ybelkada/{vit_name}' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_r50_s16_384',
type=str,
help='Name of the hybrid ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 189 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = TFXLMRobertaModel.from_pretrained('''jplu/tf-xlm-roberta-base''')
SCREAMING_SNAKE_CASE_ : Dict = {
'''input_ids''': tf.convert_to_tensor([[0, 2646, 10269, 83, 99942, 2]] , dtype=tf.intaa), # "My dog is cute"
'''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa),
}
SCREAMING_SNAKE_CASE_ : Optional[int] = model(lowercase_)['''last_hidden_state''']
SCREAMING_SNAKE_CASE_ : List[Any] = tf.TensorShape((1, 6, 768))
self.assertEqual(output.shape , lowercase_)
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE_ : int = tf.convert_to_tensor(
[
[
[0.0_68_17_62, 0.10_89_44_51, 0.06_77_25_04],
[-0.06_42_36_68, 0.02_36_66_15, 0.04_32_93_44],
[-0.06_05_72_95, 0.09_97_41_35, -0.00_07_05_84],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4))
| 91 |
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
UpperCAmelCase_ : Optional[Any] = datasets.logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
UpperCAmelCase_ : Tuple = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
UpperCAmelCase_ : Union[str, Any] = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def _A (__a , __a , __a=False , __a=False , __a=True , __a=False , __a="dummy_doc" ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = {doc: key_lines}
SCREAMING_SNAKE_CASE_ : List[str] = {doc: sys_lines}
SCREAMING_SNAKE_CASE_ : Dict = {}
SCREAMING_SNAKE_CASE_ : Dict = 0
SCREAMING_SNAKE_CASE_ : List[str] = 0
SCREAMING_SNAKE_CASE_ : Tuple = 0
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : List[str] = 0
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = reader.get_doc_mentions(__a , key_doc_lines[doc] , __a )
key_singletons_num += singletons_num
if NP_only or min_span:
SCREAMING_SNAKE_CASE_ : Optional[Any] = reader.set_annotated_parse_trees(__a , key_doc_lines[doc] , __a , __a )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = reader.get_doc_mentions(__a , sys_doc_lines[doc] , __a )
sys_singletons_num += singletons_num
if NP_only or min_span:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = reader.set_annotated_parse_trees(__a , key_doc_lines[doc] , __a , __a )
if remove_nested:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = reader.remove_nested_coref_mentions(__a , __a )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = reader.remove_nested_coref_mentions(__a , __a )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
SCREAMING_SNAKE_CASE_ : Optional[Any] = reader.get_mention_assignments(__a , __a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = reader.get_mention_assignments(__a , __a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
f'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
'''Number of resulting singleton clusters in the key '''
f'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
f'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
'''files, respectively''' )
return doc_coref_infos
def _A (__a , __a , __a , __a , __a , __a , __a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = get_coref_infos(__a , __a , __a , __a , __a , __a )
SCREAMING_SNAKE_CASE_ : str = {}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE_ : str = 0
for name, metric in metrics:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = evaluator.evaluate_documents(__a , __a , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f'{name}/recall': recall, f'{name}/precision': precision, f'{name}/f1': fa} )
logger.info(
name.ljust(10 ) , f'Recall: {recall * 1_00:.2f}' , f' Precision: {precision * 1_00:.2f}' , f' F1: {fa * 1_00:.2f}' , )
if conll_subparts_num == 3:
SCREAMING_SNAKE_CASE_ : Tuple = (conll / 3) * 1_00
logger.info(f'CoNLL score: {conll:.2f}' )
output_scores.update({'''conll_score''': conll} )
return output_scores
def _A (__a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
SCREAMING_SNAKE_CASE_ : Any = line.split()[5]
if not parse_col == "-":
SCREAMING_SNAKE_CASE_ : Any = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''')),
'''references''': datasets.Sequence(datasets.Value('''string''')),
}) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Dict=True , lowercase_ : Optional[Any]=False , lowercase_ : Optional[Any]=False , lowercase_ : Dict=False):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = util.check_gold_parse_annotation(lowercase_)
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''')
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
SCREAMING_SNAKE_CASE_ : Optional[Any] = evaluate(
key_lines=lowercase_ , sys_lines=lowercase_ , metrics=lowercase_ , NP_only=lowercase_ , remove_nested=lowercase_ , keep_singletons=lowercase_ , min_span=lowercase_ , )
return score
| 91 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 362 | '''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( a_: list[int] ):
if not nums:
return 0
_UpperCAmelCase : int = nums[0]
_UpperCAmelCase : Dict = 0
for num in nums[1:]:
_UpperCAmelCase , _UpperCAmelCase : Any = (
max_excluding + num,
max(a_, a_ ),
)
return max(a_, a_ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 17 | 0 |
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
__lowercase = namedtuple(
'''_TestCommandArgs''',
[
'''dataset''',
'''name''',
'''cache_dir''',
'''data_dir''',
'''all_configs''',
'''save_infos''',
'''ignore_verifications''',
'''force_redownload''',
'''clear_cache''',
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = _TestCommandArgs(dataset=SCREAMING_SNAKE_CASE , all_configs=SCREAMING_SNAKE_CASE , save_infos=SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[str] = TestCommand(*SCREAMING_SNAKE_CASE )
test_command.run()
__UpperCamelCase :int = os.path.join(SCREAMING_SNAKE_CASE , '''README.md''' )
assert os.path.exists(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[str] = DatasetInfosDict.from_directory(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Union[str, Any] = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) , splits=[
{
'''name''': '''train''',
'''num_bytes''': 2_351_563,
'''num_examples''': 10_000,
},
{
'''name''': '''validation''',
'''num_bytes''': 238_418,
'''num_examples''': 1_000,
},
] , download_size=3_940_680 , dataset_size=2_589_981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
__UpperCamelCase , __UpperCamelCase :Optional[int] = getattr(dataset_infos['''default'''] , SCREAMING_SNAKE_CASE ), getattr(expected_dataset_infos['''default'''] , SCREAMING_SNAKE_CASE )
if key == "num_bytes":
assert is_apercent_close(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif key == "splits":
assert list(SCREAMING_SNAKE_CASE ) == list(SCREAMING_SNAKE_CASE )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 43 | from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__lowercase = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__lowercase = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__lowercase = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Any = len([g for position, g in enumerate(SCREAMING_SNAKE_CASE ) if g == main_target[position]] )
return (item, float(SCREAMING_SNAKE_CASE ))
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = random.randint(0 , len(SCREAMING_SNAKE_CASE ) - 1 )
__UpperCamelCase :Tuple = parent_a[:random_slice] + parent_a[random_slice:]
__UpperCamelCase :Union[str, Any] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :int = list(SCREAMING_SNAKE_CASE )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__UpperCamelCase :str = random.choice(SCREAMING_SNAKE_CASE )
return "".join(SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
__UpperCamelCase :int = []
# Generate more children proportionally to the fitness score.
__UpperCamelCase :int = int(parent_a[1] * 100 ) + 1
__UpperCamelCase :List[str] = 10 if child_n >= 10 else child_n
for _ in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = population_score[random.randint(0 , SCREAMING_SNAKE_CASE )][0]
__UpperCamelCase , __UpperCamelCase :Any = crossover(parent_a[0] , SCREAMING_SNAKE_CASE )
# Append new string to the population list.
pop.append(mutate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
pop.append(mutate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
return pop
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True ):
'''simple docstring'''
if N_POPULATION < N_SELECTED:
__UpperCamelCase :List[Any] = f"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(SCREAMING_SNAKE_CASE )
# Verify that the target contains no genes besides the ones inside genes variable.
__UpperCamelCase :List[str] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__UpperCamelCase :Optional[int] = f"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(SCREAMING_SNAKE_CASE )
# Generate random starting population.
__UpperCamelCase :int = []
for _ in range(SCREAMING_SNAKE_CASE ):
population.append(''''''.join([random.choice(SCREAMING_SNAKE_CASE ) for i in range(len(SCREAMING_SNAKE_CASE ) )] ) )
# Just some logs to know what the algorithms is doing.
__UpperCamelCase , __UpperCamelCase :List[Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(SCREAMING_SNAKE_CASE )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__UpperCamelCase :Tuple = [evaluate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for item in population]
# Check if there is a matching evolution.
__UpperCamelCase :Tuple = sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[1] , reverse=SCREAMING_SNAKE_CASE )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"""\nGeneration: {generation}"""
f"""\nTotal Population:{total_population}"""
f"""\nBest score: {population_score[0][1]}"""
f"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__UpperCamelCase :str = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(SCREAMING_SNAKE_CASE )
# Normalize population score to be between 0 and 1.
__UpperCamelCase :Union[str, Any] = [
(item, score / len(SCREAMING_SNAKE_CASE )) for item, score in population_score
]
# This is selection
for i in range(SCREAMING_SNAKE_CASE ):
population.extend(select(population_score[int(SCREAMING_SNAKE_CASE )] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(SCREAMING_SNAKE_CASE ) > N_POPULATION:
break
if __name__ == "__main__":
__lowercase = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__lowercase = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__lowercase , __lowercase , __lowercase = basic(target_str, genes_list)
print(
F'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 43 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__snake_case : Dict = logging.get_logger(__name__)
__snake_case : str = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'deberta-v2'
def __init__( self : Any , lowerCAmelCase_ : Tuple=12_81_00 , lowerCAmelCase_ : List[Any]=15_36 , lowerCAmelCase_ : Any=24 , lowerCAmelCase_ : List[str]=24 , lowerCAmelCase_ : List[Any]=61_44 , lowerCAmelCase_ : Tuple="gelu" , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Dict=5_12 , lowerCAmelCase_ : Dict=0 , lowerCAmelCase_ : str=0.02 , lowerCAmelCase_ : Union[str, Any]=1e-7 , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : List[Any]=-1 , lowerCAmelCase_ : int=0 , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Union[str, Any]=0 , lowerCAmelCase_ : Dict="gelu" , **lowerCAmelCase_ : List[str] , ) -> List[str]:
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
A__ : str =hidden_size
A__ : int =num_hidden_layers
A__ : List[Any] =num_attention_heads
A__ : List[Any] =intermediate_size
A__ : Union[str, Any] =hidden_act
A__ : List[str] =hidden_dropout_prob
A__ : Dict =attention_probs_dropout_prob
A__ : List[Any] =max_position_embeddings
A__ : Any =type_vocab_size
A__ : Dict =initializer_range
A__ : int =relative_attention
A__ : Any =max_relative_positions
A__ : Dict =pad_token_id
A__ : int =position_biased_input
# Backwards compatibility
if type(lowerCAmelCase_ ) == str:
A__ : Optional[int] =[x.strip() for x in pos_att_type.lower().split("""|""" )]
A__ : str =pos_att_type
A__ : Optional[Any] =vocab_size
A__ : Optional[Any] =layer_norm_eps
A__ : int =kwargs.get("""pooler_hidden_size""" , lowerCAmelCase_ )
A__ : Tuple =pooler_dropout
A__ : Any =pooler_hidden_act
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
@property
def lowercase__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
A__ : Any ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A__ : List[Any] ={0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def lowercase__ ( self : Tuple ) -> int:
'''simple docstring'''
return 12
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional["TensorType"] = None , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 40 , lowerCAmelCase_ : int = 40 , lowerCAmelCase_ : "PreTrainedTokenizerBase" = None , ) -> Mapping[str, Any]:
'''simple docstring'''
A__ : List[Any] =super().generate_dummy_inputs(preprocessor=lowerCAmelCase_ , framework=lowerCAmelCase_ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs | 365 |
'''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def __lowerCamelCase ( __snake_case : int ) -> int:
"""simple docstring"""
A__ : List[Any] =prime_factors(__snake_case )
if is_square_free(__snake_case ):
return -1 if len(__snake_case ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 136 | 0 |
def lowercase_ ( _A : List[str] = 50 ):
"""simple docstring"""
lowerCamelCase__ : Tuple = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'{solution() = }')
| 184 |
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase__ : List[str] = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCAmelCase__ : List[Any] = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = SavedModel()
SCREAMING_SNAKE_CASE__ : Dict = []
with open(os.path.join(_snake_case ,"""utils""" ,"""tf_ops""" ,"""onnx.json""" ) ) as f:
SCREAMING_SNAKE_CASE__ : Any = json.load(_snake_case )["""opsets"""]
for i in range(1 ,opset + 1 ):
onnx_ops.extend(onnx_opsets[str(_snake_case )] )
with open(_snake_case ,"""rb""" ) as f:
saved_model.ParseFromString(f.read() )
SCREAMING_SNAKE_CASE__ : List[str] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
SCREAMING_SNAKE_CASE__ : int = sorted(_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(_snake_case )
if strict and len(_snake_case ) > 0:
raise Exception(f'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(_snake_case ) > 0:
print(f'''Found the following incompatible ops for the opset {opset}:''' )
print(*_snake_case ,sep="""\n""" )
else:
print(f'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=1_2, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
UpperCAmelCase__ : Dict = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 25 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ : Union[str, Any] = logging.get_logger(__name__)
a_ : Optional[int] = {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/config.json""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/config.json""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"""
),
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Tuple ='xlm-roberta'
def __init__( self, lowerCAmelCase=30_522, lowerCAmelCase=768, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3_072, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-12, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase="absolute", lowerCAmelCase=True, lowerCAmelCase=None, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =vocab_size
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_act
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =position_embedding_type
lowerCamelCase_ =use_cache
lowerCamelCase_ =classifier_dropout
class __UpperCamelCase ( lowerCamelCase__ ):
@property
def lowercase__ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCamelCase_ ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase_ ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 6 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def a_ ( __snake_case : Tuple ) -> str:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __UpperCamelCase ( lowerCamelCase__ ):
@staticmethod
def lowercase__ ( lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''', type=lowerCAmelCase, default=lowerCAmelCase, help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''', action='''store_true''', help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''', action='''store_true''', help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''', )
download_parser.add_argument('''model''', type=lowerCAmelCase, help='''Name of the model to download''' )
download_parser.set_defaults(func=lowerCAmelCase )
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =model
lowerCamelCase_ =cache
lowerCamelCase_ =force
lowerCamelCase_ =trust_remote_code
def lowercase__ ( self ):
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code )
| 6 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowercase : str = logging.get_logger(__name__)
class __UpperCamelCase ( A__ ):
A_ = ['''pixel_values''']
def __init__( self , __a = True , __a = 32 , __a=PILImageResampling.BILINEAR , __a = True , **__a , ):
'''simple docstring'''
__a : List[Any] = do_resize
__a : int = do_rescale
__a : Dict = size_divisor
__a : int = resample
super().__init__(**__a )
def __UpperCAmelCase ( self , __a , __a , __a , __a = None , **__a ):
'''simple docstring'''
__a : Any = get_image_size(__a )
# Rounds the height and width down to the closest multiple of size_divisor
__a : Union[str, Any] = height // size_divisor * size_divisor
__a : List[str] = width // size_divisor * size_divisor
__a : Any = resize(__a , (new_h, new_w) , resample=__a , data_format=__a , **__a )
return image
def __UpperCAmelCase ( self , __a , __a , __a = None , **__a ):
'''simple docstring'''
return rescale(image=__a , scale=__a , data_format=__a , **__a )
def __UpperCAmelCase ( self , __a , __a = None , __a = None , __a=None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ):
'''simple docstring'''
__a : Tuple = do_resize if do_resize is not None else self.do_resize
__a : Dict = do_rescale if do_rescale is not None else self.do_rescale
__a : int = size_divisor if size_divisor is not None else self.size_divisor
__a : Union[str, Any] = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
__a : Union[str, Any] = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
__a : str = [to_numpy_array(__a ) for img in images]
if do_resize:
__a : Union[str, Any] = [self.resize(__a , size_divisor=__a , resample=__a ) for image in images]
if do_rescale:
__a : str = [self.rescale(__a , scale=1 / 255 ) for image in images]
__a : int = [to_channel_dimension_format(__a , __a ) for image in images]
__a : Dict = {"pixel_values": images}
return BatchFeature(data=__a , tensor_type=__a )
| 27 |
from math import factorial
class __a :
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : Tuple = real
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Union[str, Any] = [1] * rank
else:
UpperCamelCase__ : int = rank
def __repr__( self : Tuple ):
'''simple docstring'''
return (
F'{self.real}+'
F'{"+".join(str(SCREAMING_SNAKE_CASE )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'
)
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , SCREAMING_SNAKE_CASE )
def __add__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return Dual(self.real + other , self.duals )
UpperCamelCase__ : Optional[int] = self.duals.copy()
UpperCamelCase__ : Any = other.duals.copy()
if len(SCREAMING_SNAKE_CASE ) > len(SCREAMING_SNAKE_CASE ):
o_dual.extend([1] * (len(SCREAMING_SNAKE_CASE ) - len(SCREAMING_SNAKE_CASE )) )
elif len(SCREAMING_SNAKE_CASE ) < len(SCREAMING_SNAKE_CASE ):
s_dual.extend([1] * (len(SCREAMING_SNAKE_CASE ) - len(SCREAMING_SNAKE_CASE )) )
UpperCamelCase__ : Optional[int] = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , SCREAMING_SNAKE_CASE )
_lowerCAmelCase : Dict = __add__
def __sub__( self : Tuple , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
return self + other * -1
def __mul__( self : int , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : str = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , SCREAMING_SNAKE_CASE )
_lowerCAmelCase : Union[str, Any] = __mul__
def __truediv__( self : List[Any] , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : str = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , SCREAMING_SNAKE_CASE )
raise ValueError
def __floordiv__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Dict = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , SCREAMING_SNAKE_CASE )
raise ValueError
def __pow__( self : str , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if n < 0 or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
UpperCamelCase__ : str = self
for _ in range(n - 1 ):
x *= self
return x
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
if not callable(__lowerCAmelCase ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(__lowerCAmelCase , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("differentiate() requires an int as input for order" )
UpperCamelCase__ : Optional[Any] = Dual(__lowerCAmelCase , 1 )
UpperCamelCase__ : Any = func(__lowerCAmelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[Any]:
return y**2 * y**4
print(differentiate(f, 9, 2)) | 189 | 0 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
UpperCamelCase__ = 'CompVis/stable-diffusion-v1-1'
UpperCamelCase__ = 'CompVis/stable-diffusion-v1-2'
UpperCamelCase__ = 'CompVis/stable-diffusion-v1-3'
UpperCamelCase__ = 'CompVis/stable-diffusion-v1-4'
class A ( UpperCAmelCase_ ):
def __init__(self : Union[str, Any] , __UpperCAmelCase : AutoencoderKL , __UpperCAmelCase : CLIPTextModel , __UpperCAmelCase : CLIPTokenizer , __UpperCAmelCase : UNetaDConditionModel , __UpperCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __UpperCAmelCase : StableDiffusionSafetyChecker , __UpperCAmelCase : CLIPImageProcessor , __UpperCAmelCase : bool = True , ) -> Tuple:
"""simple docstring"""
super()._init_()
UpperCAmelCase__ = StableDiffusionPipeline.from_pretrained(__UpperCAmelCase )
UpperCAmelCase__ = StableDiffusionPipeline.from_pretrained(__UpperCAmelCase )
UpperCAmelCase__ = StableDiffusionPipeline.from_pretrained(__UpperCAmelCase )
UpperCAmelCase__ = StableDiffusionPipeline(
vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , requires_safety_checker=__UpperCAmelCase , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def lowercase_ (self : int ) -> Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , __UpperCAmelCase ) for k in self.config.keys() if not k.startswith("_" )}
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : Optional[Union[str, int]] = "auto" ) -> Optional[int]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCAmelCase )
def lowercase_ (self : int ) -> Optional[int]:
"""simple docstring"""
self.enable_attention_slicing(__UpperCAmelCase )
@torch.no_grad()
def lowercase_ (self : Optional[int] , __UpperCAmelCase : Union[str, List[str]] , __UpperCAmelCase : int = 5_1_2 , __UpperCAmelCase : int = 5_1_2 , __UpperCAmelCase : int = 5_0 , __UpperCAmelCase : float = 7.5 , __UpperCAmelCase : Optional[Union[str, List[str]]] = None , __UpperCAmelCase : Optional[int] = 1 , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : Optional[torch.Generator] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCAmelCase : int = 1 , **__UpperCAmelCase : Any , ) -> Dict:
"""simple docstring"""
return self.pipea(
prompt=__UpperCAmelCase , height=__UpperCAmelCase , width=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , output_type=__UpperCAmelCase , return_dict=__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=__UpperCAmelCase , **__UpperCAmelCase , )
@torch.no_grad()
def lowercase_ (self : List[str] , __UpperCAmelCase : Union[str, List[str]] , __UpperCAmelCase : int = 5_1_2 , __UpperCAmelCase : int = 5_1_2 , __UpperCAmelCase : int = 5_0 , __UpperCAmelCase : float = 7.5 , __UpperCAmelCase : Optional[Union[str, List[str]]] = None , __UpperCAmelCase : Optional[int] = 1 , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : Optional[torch.Generator] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCAmelCase : int = 1 , **__UpperCAmelCase : Optional[Any] , ) -> Any:
"""simple docstring"""
return self.pipea(
prompt=__UpperCAmelCase , height=__UpperCAmelCase , width=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , output_type=__UpperCAmelCase , return_dict=__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=__UpperCAmelCase , **__UpperCAmelCase , )
@torch.no_grad()
def lowercase_ (self : List[str] , __UpperCAmelCase : Union[str, List[str]] , __UpperCAmelCase : int = 5_1_2 , __UpperCAmelCase : int = 5_1_2 , __UpperCAmelCase : int = 5_0 , __UpperCAmelCase : float = 7.5 , __UpperCAmelCase : Optional[Union[str, List[str]]] = None , __UpperCAmelCase : Optional[int] = 1 , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : Optional[torch.Generator] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCAmelCase : int = 1 , **__UpperCAmelCase : Any , ) -> Dict:
"""simple docstring"""
return self.pipea(
prompt=__UpperCAmelCase , height=__UpperCAmelCase , width=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , output_type=__UpperCAmelCase , return_dict=__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=__UpperCAmelCase , **__UpperCAmelCase , )
@torch.no_grad()
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : Union[str, List[str]] , __UpperCAmelCase : int = 5_1_2 , __UpperCAmelCase : int = 5_1_2 , __UpperCAmelCase : int = 5_0 , __UpperCAmelCase : float = 7.5 , __UpperCAmelCase : Optional[Union[str, List[str]]] = None , __UpperCAmelCase : Optional[int] = 1 , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : Optional[torch.Generator] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCAmelCase : int = 1 , **__UpperCAmelCase : Union[str, Any] , ) -> List[str]:
"""simple docstring"""
return self.pipea(
prompt=__UpperCAmelCase , height=__UpperCAmelCase , width=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , output_type=__UpperCAmelCase , return_dict=__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=__UpperCAmelCase , **__UpperCAmelCase , )
@torch.no_grad()
def lowercase_ (self : int , __UpperCAmelCase : Union[str, List[str]] , __UpperCAmelCase : int = 5_1_2 , __UpperCAmelCase : int = 5_1_2 , __UpperCAmelCase : int = 5_0 , __UpperCAmelCase : float = 7.5 , __UpperCAmelCase : Optional[Union[str, List[str]]] = None , __UpperCAmelCase : Optional[int] = 1 , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : Optional[torch.Generator] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCAmelCase : int = 1 , **__UpperCAmelCase : Any , ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = "cuda" if torch.cuda.is_available() else "cpu"
self.to(__UpperCAmelCase )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCAmelCase__ = self.textaimg_sda_a(
prompt=__UpperCAmelCase , height=__UpperCAmelCase , width=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , output_type=__UpperCAmelCase , return_dict=__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=__UpperCAmelCase , **__UpperCAmelCase , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCAmelCase__ = self.textaimg_sda_a(
prompt=__UpperCAmelCase , height=__UpperCAmelCase , width=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , output_type=__UpperCAmelCase , return_dict=__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=__UpperCAmelCase , **__UpperCAmelCase , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCAmelCase__ = self.textaimg_sda_a(
prompt=__UpperCAmelCase , height=__UpperCAmelCase , width=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , output_type=__UpperCAmelCase , return_dict=__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=__UpperCAmelCase , **__UpperCAmelCase , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCAmelCase__ = self.textaimg_sda_a(
prompt=__UpperCAmelCase , height=__UpperCAmelCase , width=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , output_type=__UpperCAmelCase , return_dict=__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=__UpperCAmelCase , **__UpperCAmelCase , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 143 | from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'
),
}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Optional[int] = 'xlm-prophetnet'
__UpperCAmelCase : int = ['past_key_values']
__UpperCAmelCase : Optional[int] = {
'num_attention_heads': 'num_encoder_attention_heads',
}
def __init__(self : str , __UpperCAmelCase : Optional[float] = 0.1 , __UpperCAmelCase : Optional[Union[str, Callable]] = "gelu" , __UpperCAmelCase : Optional[int] = 3_0_5_2_2 , __UpperCAmelCase : Optional[int] = 1_0_2_4 , __UpperCAmelCase : Optional[int] = 4_0_9_6 , __UpperCAmelCase : Optional[int] = 1_2 , __UpperCAmelCase : Optional[int] = 1_6 , __UpperCAmelCase : Optional[int] = 4_0_9_6 , __UpperCAmelCase : Optional[int] = 1_2 , __UpperCAmelCase : Optional[int] = 1_6 , __UpperCAmelCase : Optional[float] = 0.1 , __UpperCAmelCase : Optional[float] = 0.1 , __UpperCAmelCase : Optional[int] = 5_1_2 , __UpperCAmelCase : Optional[float] = 0.02 , __UpperCAmelCase : Optional[bool] = True , __UpperCAmelCase : Optional[bool] = True , __UpperCAmelCase : Optional[int] = 0 , __UpperCAmelCase : Optional[int] = 2 , __UpperCAmelCase : Optional[int] = 3_2 , __UpperCAmelCase : Optional[int] = 1_2_8 , __UpperCAmelCase : Optional[bool] = False , __UpperCAmelCase : Optional[float] = 0.0 , __UpperCAmelCase : Optional[bool] = True , __UpperCAmelCase : Optional[int] = 0 , __UpperCAmelCase : Optional[int] = 1 , __UpperCAmelCase : Optional[int] = 2 , **__UpperCAmelCase : Optional[Any] , ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = encoder_ffn_dim
UpperCAmelCase__ = num_encoder_layers
UpperCAmelCase__ = num_encoder_attention_heads
UpperCAmelCase__ = decoder_ffn_dim
UpperCAmelCase__ = num_decoder_layers
UpperCAmelCase__ = num_decoder_attention_heads
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = init_std # Normal(0, this parameter)
UpperCAmelCase__ = activation_function
# parameters for xlmprophetnet
UpperCAmelCase__ = ngram
UpperCAmelCase__ = num_buckets
UpperCAmelCase__ = relative_max_distance
UpperCAmelCase__ = disable_ngram_loss
UpperCAmelCase__ = eps
# 3 Types of Dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = activation_dropout
UpperCAmelCase__ = dropout
UpperCAmelCase__ = use_cache
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , add_cross_attention=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
@property
def lowercase_ (self : int ) -> int:
"""simple docstring"""
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def lowercase_ (self : Any , __UpperCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"
" `num_decoder_layers`." )
| 143 | 1 |
from __future__ import annotations
import math
_snake_case = "2020.9.26"
_snake_case = "xcodz-dot, cclaus, dhruvmanila"
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
if not all(isinstance(UpperCamelCase_,(float, int) ) for val in locals().values() ):
_A : Optional[Any] = f'''Input values must either be float or int: {list(locals().values() )}'''
raise TypeError(UpperCamelCase_ )
_A : Dict = ((x * distance) / (z + distance)) * scale
_A : Any = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
if not isinstance(UpperCamelCase_,UpperCamelCase_ ):
raise TypeError("""Axis must be a str""" )
_A : List[Any] = locals()
del input_variables["axis"]
if not all(isinstance(UpperCamelCase_,(float, int) ) for val in input_variables.values() ):
_A : Any = (
"""Input values except axis must either be float or int: """
f'''{list(input_variables.values() )}'''
)
raise TypeError(UpperCamelCase_ )
_A : Optional[Any] = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
_A : Union[str, Any] = x * math.cos(UpperCamelCase_ ) - y * math.sin(UpperCamelCase_ )
_A : Tuple = y * math.cos(UpperCamelCase_ ) + x * math.sin(UpperCamelCase_ )
_A : List[str] = z
elif axis == "x":
_A : int = y * math.cos(UpperCamelCase_ ) - z * math.sin(UpperCamelCase_ )
_A : Union[str, Any] = z * math.cos(UpperCamelCase_ ) + y * math.sin(UpperCamelCase_ )
_A : str = x
elif axis == "y":
_A : int = x * math.cos(UpperCamelCase_ ) - z * math.sin(UpperCamelCase_ )
_A : Union[str, Any] = z * math.cos(UpperCamelCase_ ) + x * math.sin(UpperCamelCase_ )
_A : List[str] = y
else:
raise ValueError("""not a valid axis, choose one of 'x', 'y', 'z'""" )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{convert_to_ad(1.0, 2.0, 3.0, 1_0.0, 1_0.0) = }""")
print(f"""{rotate(1.0, 2.0, 3.0, "y", 9_0.0) = }""")
| 26 |
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
_a = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
_a = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
_a = r'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ), reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"], )
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Optional[int]=False ):
__lowercase = spearmanr(UpperCAmelCase__, UpperCAmelCase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 17 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[str] = ["flax", "transformers"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def __A ( cls , *a__ , **a__ ):
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def __A ( cls , *a__ , **a__ ):
requires_backends(cls , ["""flax""", """transformers"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Any = ["flax", "transformers"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def __A ( cls , *a__ , **a__ ):
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def __A ( cls , *a__ , **a__ ):
requires_backends(cls , ["""flax""", """transformers"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Any = ["flax", "transformers"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def __A ( cls , *a__ , **a__ ):
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def __A ( cls , *a__ , **a__ ):
requires_backends(cls , ["""flax""", """transformers"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[int] = ["flax", "transformers"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def __A ( cls , *a__ , **a__ ):
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def __A ( cls , *a__ , **a__ ):
requires_backends(cls , ["""flax""", """transformers"""] )
| 126 | """simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __A :
def __init__( self , a__ , a__=2 , a__=3 , a__=4 , a__=2 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=36 , a__=3 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.0_2 , a__=6 , a__=6 , a__=3 , a__=4 , a__=None , a__=1000 , ):
_lowerCAmelCase : Union[str, Any] = parent
_lowerCAmelCase : Optional[Any] = batch_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : str = image_size
_lowerCAmelCase : str = patch_size
_lowerCAmelCase : str = text_seq_length
_lowerCAmelCase : List[Any] = is_training
_lowerCAmelCase : List[str] = use_input_mask
_lowerCAmelCase : List[Any] = use_token_type_ids
_lowerCAmelCase : Optional[Any] = use_labels
_lowerCAmelCase : Any = vocab_size
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : Tuple = max_position_embeddings
_lowerCAmelCase : Dict = type_vocab_size
_lowerCAmelCase : Tuple = type_sequence_label_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Dict = coordinate_size
_lowerCAmelCase : Optional[int] = shape_size
_lowerCAmelCase : str = num_labels
_lowerCAmelCase : Optional[Any] = num_choices
_lowerCAmelCase : str = scope
_lowerCAmelCase : Dict = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_lowerCAmelCase : Optional[int] = text_seq_length
_lowerCAmelCase : Any = (image_size // patch_size) ** 2 + 1
_lowerCAmelCase : Any = self.text_seq_length + self.image_seq_length
def __A ( self ):
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCAmelCase : Optional[Any] = bbox[i, j, 3]
_lowerCAmelCase : List[str] = bbox[i, j, 1]
_lowerCAmelCase : List[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCAmelCase : int = bbox[i, j, 2]
_lowerCAmelCase : Optional[int] = bbox[i, j, 0]
_lowerCAmelCase : Optional[int] = t
_lowerCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : List[str] = None
if self.use_input_mask:
_lowerCAmelCase : int = random_attention_mask([self.batch_size, self.text_seq_length] )
_lowerCAmelCase : str = None
if self.use_token_type_ids:
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_lowerCAmelCase : int = None
_lowerCAmelCase : int = None
if self.use_labels:
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : str = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_lowerCAmelCase : str = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = LayoutLMvaModel(config=a__ )
model.to(a__ )
model.eval()
# text + image
_lowerCAmelCase : Optional[Any] = model(a__ , pixel_values=a__ )
_lowerCAmelCase : Any = model(
a__ , bbox=a__ , pixel_values=a__ , attention_mask=a__ , token_type_ids=a__ )
_lowerCAmelCase : List[Any] = model(a__ , bbox=a__ , pixel_values=a__ , token_type_ids=a__ )
_lowerCAmelCase : Tuple = model(a__ , bbox=a__ , pixel_values=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_lowerCAmelCase : Dict = model(a__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_lowerCAmelCase : Optional[Any] = model(pixel_values=a__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = self.num_labels
_lowerCAmelCase : int = LayoutLMvaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Tuple = model(
a__ , bbox=a__ , pixel_values=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = self.num_labels
_lowerCAmelCase : str = LayoutLMvaForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : List[str] = model(
a__ , bbox=a__ , pixel_values=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Dict = LayoutLMvaForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : str = model(
a__ , bbox=a__ , pixel_values=a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self ):
_lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Any = config_and_inputs
_lowerCAmelCase : Union[str, Any] = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Tuple = False
_UpperCamelCase : Any = False
_UpperCamelCase : Any = False
_UpperCamelCase : int = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCamelCase : Union[str, Any] = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def __A ( self , a__ , a__ , a__ , a__ , a__ ):
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def __A ( self ):
_lowerCAmelCase : Any = LayoutLMvaModelTester(self )
_lowerCAmelCase : int = ConfigTester(self , config_class=a__ , hidden_size=37 )
def __A ( self , a__ , a__ , a__=False ):
_lowerCAmelCase : List[str] = copy.deepcopy(a__ )
if model_class in get_values(a__ ):
_lowerCAmelCase : Optional[int] = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(a__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(a__ ):
_lowerCAmelCase : List[Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=a__ )
elif model_class in get_values(a__ ):
_lowerCAmelCase : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a__ )
_lowerCAmelCase : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a__ )
elif model_class in [
*get_values(a__ ),
]:
_lowerCAmelCase : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a__ )
elif model_class in [
*get_values(a__ ),
]:
_lowerCAmelCase : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=a__ , )
return inputs_dict
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase : int = type
self.model_tester.create_and_check_model(*a__ )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a__ )
@slow
def __A ( self ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : str = LayoutLMvaModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class __A ( unittest.TestCase ):
@cached_property
def __A ( self ):
return LayoutLMvaImageProcessor(apply_ocr=a__ ) if is_vision_available() else None
@slow
def __A ( self ):
_lowerCAmelCase : Optional[int] = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(a__ )
_lowerCAmelCase : Dict = self.default_image_processor
_lowerCAmelCase : Optional[int] = prepare_img()
_lowerCAmelCase : Dict = image_processor(images=a__ , return_tensors="""pt""" ).pixel_values.to(a__ )
_lowerCAmelCase : Optional[Any] = torch.tensor([[1, 2]] )
_lowerCAmelCase : Dict = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_lowerCAmelCase : str = model(
input_ids=input_ids.to(a__ ) , bbox=bbox.to(a__ ) , pixel_values=pixel_values.to(a__ ) , )
# verify the logits
_lowerCAmelCase : Optional[int] = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , a__ )
_lowerCAmelCase : Union[str, Any] = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(a__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a__ , atol=1e-4 ) )
| 126 | 1 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowercase : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
lowercase : Tuple = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : Tuple = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , 'models/bert/'))
a__ : Any = self.transformer_dir
shutil.copy(
os.path.join(lowerCAmelCase_ , 'src/transformers/models/bert/modeling_bert.py') , os.path.join(self.transformer_dir , 'models/bert/modeling_bert.py') , )
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[Any] = 'src/transformers'
shutil.rmtree(self.transformer_dir)
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase=None) -> Dict:
'''simple docstring'''
a__ : Optional[Any] = comment + F'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
a__ : Any = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result
a__ : List[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119)
a__ : Any = black.format_str(lowerCAmelCase_ , mode=lowerCAmelCase_)
a__ : int = os.path.join(self.transformer_dir , 'new_code.py')
with open(lowerCAmelCase_ , 'w' , newline='\n') as f:
f.write(lowerCAmelCase_)
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCAmelCase_)) == 0)
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCAmelCase_)
with open(lowerCAmelCase_ , 'r') as f:
self.assertTrue(f.read() , lowerCAmelCase_)
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : Any = check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead')
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_)
def __lowercase ( self) -> int:
'''simple docstring'''
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , lowerCAmelCase_ , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , re.sub('Bert' , 'TestModel' , lowerCAmelCase_) , )
# Copy consistency with a really long name
a__ : Any = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
F'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}' , F'{long_class_name}LMPredictionHead' , re.sub('Bert' , lowerCAmelCase_ , lowerCAmelCase_) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , lowerCAmelCase_ , overwrite_result=re.sub('Bert' , 'TestModel' , lowerCAmelCase_) , )
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : List[Any] = check_copies.LOCALIZED_READMES['README_zh-hans.md']
a__ : List[str] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
a__ : str = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
a__ : Tuple = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
a__ , a__ : Tuple = check_copies.convert_to_localized_md(
lowerCAmelCase_ , lowerCAmelCase_ , localized_readme['format_model_list'])
self.assertFalse(lowerCAmelCase_)
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_)
a__ , a__ : List[Any] = check_copies.convert_to_localized_md(
lowerCAmelCase_ , lowerCAmelCase_ , localized_readme['format_model_list'])
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowerCAmelCase_)
a__ : Union[str, Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
a__ : int = (
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
a__ : List[Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
a__ , a__ : Dict = check_copies.convert_to_localized_md(
lowerCAmelCase_ , lowerCAmelCase_ , localized_readme['format_model_list'])
# Check if the model link is synchronized.
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_)
| 99 |
"""simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
UpperCAmelCase : Union[str, Any] = TypeVar("T")
UpperCAmelCase : Dict = Union[List[T], Tuple[T, ...]]
UpperCAmelCase : int = Union[T, List[T], Dict[str, T]]
UpperCAmelCase : Tuple = Union[str, bytes, os.PathLike]
| 136 | 0 |
def lowerCAmelCase_ ( __A ) -> list:
'''simple docstring'''
if len(__A ) <= 1:
return [tuple(__A )]
UpperCAmelCase__ = []
def generate(__A, __A ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1, __A )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
UpperCAmelCase__ , UpperCAmelCase__ = arr[k - 1], arr[i]
else: # k is odd
UpperCAmelCase__ , UpperCAmelCase__ = arr[k - 1], arr[0]
generate(k - 1, __A )
generate(len(__A ), __A )
return res
if __name__ == "__main__":
UpperCamelCase__ = input('Enter numbers separated by a comma:\n').strip()
UpperCamelCase__ = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 357 | from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
UpperCamelCase__ = 'CompVis/stable-diffusion-v1-1'
UpperCamelCase__ = 'CompVis/stable-diffusion-v1-2'
UpperCamelCase__ = 'CompVis/stable-diffusion-v1-3'
UpperCamelCase__ = 'CompVis/stable-diffusion-v1-4'
class A ( UpperCAmelCase_ ):
def __init__(self : Union[str, Any] , __UpperCAmelCase : AutoencoderKL , __UpperCAmelCase : CLIPTextModel , __UpperCAmelCase : CLIPTokenizer , __UpperCAmelCase : UNetaDConditionModel , __UpperCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __UpperCAmelCase : StableDiffusionSafetyChecker , __UpperCAmelCase : CLIPImageProcessor , __UpperCAmelCase : bool = True , ) -> Tuple:
"""simple docstring"""
super()._init_()
UpperCAmelCase__ = StableDiffusionPipeline.from_pretrained(__UpperCAmelCase )
UpperCAmelCase__ = StableDiffusionPipeline.from_pretrained(__UpperCAmelCase )
UpperCAmelCase__ = StableDiffusionPipeline.from_pretrained(__UpperCAmelCase )
UpperCAmelCase__ = StableDiffusionPipeline(
vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , requires_safety_checker=__UpperCAmelCase , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def lowercase_ (self : int ) -> Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , __UpperCAmelCase ) for k in self.config.keys() if not k.startswith("_" )}
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : Optional[Union[str, int]] = "auto" ) -> Optional[int]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCAmelCase )
def lowercase_ (self : int ) -> Optional[int]:
"""simple docstring"""
self.enable_attention_slicing(__UpperCAmelCase )
@torch.no_grad()
def lowercase_ (self : Optional[int] , __UpperCAmelCase : Union[str, List[str]] , __UpperCAmelCase : int = 5_1_2 , __UpperCAmelCase : int = 5_1_2 , __UpperCAmelCase : int = 5_0 , __UpperCAmelCase : float = 7.5 , __UpperCAmelCase : Optional[Union[str, List[str]]] = None , __UpperCAmelCase : Optional[int] = 1 , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : Optional[torch.Generator] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCAmelCase : int = 1 , **__UpperCAmelCase : Any , ) -> Dict:
"""simple docstring"""
return self.pipea(
prompt=__UpperCAmelCase , height=__UpperCAmelCase , width=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , output_type=__UpperCAmelCase , return_dict=__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=__UpperCAmelCase , **__UpperCAmelCase , )
@torch.no_grad()
def lowercase_ (self : List[str] , __UpperCAmelCase : Union[str, List[str]] , __UpperCAmelCase : int = 5_1_2 , __UpperCAmelCase : int = 5_1_2 , __UpperCAmelCase : int = 5_0 , __UpperCAmelCase : float = 7.5 , __UpperCAmelCase : Optional[Union[str, List[str]]] = None , __UpperCAmelCase : Optional[int] = 1 , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : Optional[torch.Generator] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCAmelCase : int = 1 , **__UpperCAmelCase : Optional[Any] , ) -> Any:
"""simple docstring"""
return self.pipea(
prompt=__UpperCAmelCase , height=__UpperCAmelCase , width=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , output_type=__UpperCAmelCase , return_dict=__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=__UpperCAmelCase , **__UpperCAmelCase , )
@torch.no_grad()
def lowercase_ (self : List[str] , __UpperCAmelCase : Union[str, List[str]] , __UpperCAmelCase : int = 5_1_2 , __UpperCAmelCase : int = 5_1_2 , __UpperCAmelCase : int = 5_0 , __UpperCAmelCase : float = 7.5 , __UpperCAmelCase : Optional[Union[str, List[str]]] = None , __UpperCAmelCase : Optional[int] = 1 , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : Optional[torch.Generator] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCAmelCase : int = 1 , **__UpperCAmelCase : Any , ) -> Dict:
"""simple docstring"""
return self.pipea(
prompt=__UpperCAmelCase , height=__UpperCAmelCase , width=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , output_type=__UpperCAmelCase , return_dict=__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=__UpperCAmelCase , **__UpperCAmelCase , )
@torch.no_grad()
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : Union[str, List[str]] , __UpperCAmelCase : int = 5_1_2 , __UpperCAmelCase : int = 5_1_2 , __UpperCAmelCase : int = 5_0 , __UpperCAmelCase : float = 7.5 , __UpperCAmelCase : Optional[Union[str, List[str]]] = None , __UpperCAmelCase : Optional[int] = 1 , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : Optional[torch.Generator] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCAmelCase : int = 1 , **__UpperCAmelCase : Union[str, Any] , ) -> List[str]:
"""simple docstring"""
return self.pipea(
prompt=__UpperCAmelCase , height=__UpperCAmelCase , width=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , output_type=__UpperCAmelCase , return_dict=__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=__UpperCAmelCase , **__UpperCAmelCase , )
@torch.no_grad()
def lowercase_ (self : int , __UpperCAmelCase : Union[str, List[str]] , __UpperCAmelCase : int = 5_1_2 , __UpperCAmelCase : int = 5_1_2 , __UpperCAmelCase : int = 5_0 , __UpperCAmelCase : float = 7.5 , __UpperCAmelCase : Optional[Union[str, List[str]]] = None , __UpperCAmelCase : Optional[int] = 1 , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : Optional[torch.Generator] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCAmelCase : int = 1 , **__UpperCAmelCase : Any , ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = "cuda" if torch.cuda.is_available() else "cpu"
self.to(__UpperCAmelCase )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCAmelCase__ = self.textaimg_sda_a(
prompt=__UpperCAmelCase , height=__UpperCAmelCase , width=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , output_type=__UpperCAmelCase , return_dict=__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=__UpperCAmelCase , **__UpperCAmelCase , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCAmelCase__ = self.textaimg_sda_a(
prompt=__UpperCAmelCase , height=__UpperCAmelCase , width=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , output_type=__UpperCAmelCase , return_dict=__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=__UpperCAmelCase , **__UpperCAmelCase , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCAmelCase__ = self.textaimg_sda_a(
prompt=__UpperCAmelCase , height=__UpperCAmelCase , width=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , output_type=__UpperCAmelCase , return_dict=__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=__UpperCAmelCase , **__UpperCAmelCase , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCAmelCase__ = self.textaimg_sda_a(
prompt=__UpperCAmelCase , height=__UpperCAmelCase , width=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , output_type=__UpperCAmelCase , return_dict=__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=__UpperCAmelCase , **__UpperCAmelCase , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 143 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Union[str, Any] = logging.get_logger(__name__)
A : Union[str, Any] = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class __A( a ):
snake_case_ = '''xlm-roberta'''
def __init__( self , _snake_case=30_522 , _snake_case=768 , _snake_case=12 , _snake_case=12 , _snake_case=3_072 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=512 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=1 , _snake_case=0 , _snake_case=2 , _snake_case="absolute" , _snake_case=True , _snake_case=None , **_snake_case , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = position_embedding_type
__a = use_cache
__a = classifier_dropout
class __A( a ):
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__a = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] ) | 6 |
def __lowerCAmelCase ( a__ , a__ , a__ ) -> list:
__a = len(a__ )
__a = [[0] * n for i in range(a__ )]
for i in range(a__ ):
__a = y_points[i]
for i in range(2 , a__ ):
for j in range(a__ , a__ ):
__a = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod() | 6 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
UpperCamelCase_ : int = None
UpperCamelCase_ : int = logging.get_logger(__name__)
UpperCamelCase_ : Dict = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase_ : str = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
UpperCamelCase_ : Any = {
'facebook/nllb-large-en-ro': 1024,
'facebook/nllb-200-distilled-600M': 1024,
}
# fmt: off
UpperCamelCase_ : int = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class _a ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Dict = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : Optional[Any] = NllbTokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = []
SCREAMING_SNAKE_CASE_ : List[str] = []
def __init__( self ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE="<s>" ,_SCREAMING_SNAKE_CASE="</s>" ,_SCREAMING_SNAKE_CASE="</s>" ,_SCREAMING_SNAKE_CASE="<s>" ,_SCREAMING_SNAKE_CASE="<unk>" ,_SCREAMING_SNAKE_CASE="<pad>" ,_SCREAMING_SNAKE_CASE="<mask>" ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=False ,**_SCREAMING_SNAKE_CASE ,) -> Optional[int]:
_snake_case = AddedToken(_SCREAMING_SNAKE_CASE ,lstrip=_SCREAMING_SNAKE_CASE ,rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else mask_token
_snake_case = legacy_behaviour
super().__init__(
vocab_file=_SCREAMING_SNAKE_CASE ,tokenizer_file=_SCREAMING_SNAKE_CASE ,bos_token=_SCREAMING_SNAKE_CASE ,eos_token=_SCREAMING_SNAKE_CASE ,sep_token=_SCREAMING_SNAKE_CASE ,cls_token=_SCREAMING_SNAKE_CASE ,unk_token=_SCREAMING_SNAKE_CASE ,pad_token=_SCREAMING_SNAKE_CASE ,mask_token=_SCREAMING_SNAKE_CASE ,src_lang=_SCREAMING_SNAKE_CASE ,tgt_lang=_SCREAMING_SNAKE_CASE ,additional_special_tokens=_SCREAMING_SNAKE_CASE ,legacy_behaviour=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
_snake_case = vocab_file
_snake_case = False if not self.vocab_file else True
_snake_case = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
_snake_case = {
lang_code: self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_snake_case = src_lang if src_lang is not None else "eng_Latn"
_snake_case = self.convert_tokens_to_ids(self._src_lang )
_snake_case = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _lowercase ( self ) -> str:
return self._src_lang
@src_lang.setter
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> None:
_snake_case = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> List[int]:
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
_snake_case = src_lang
_snake_case = self(_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
_snake_case = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
_snake_case = tgt_lang_id
return inputs
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = "eng_Latn" ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = "fra_Latn" ,**_SCREAMING_SNAKE_CASE ,) -> BatchEncoding:
_snake_case = src_lang
_snake_case = tgt_lang
return super().prepare_seqaseq_batch(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def _lowercase ( self ) -> Optional[Any]:
return self.set_src_lang_special_tokens(self.src_lang )
def _lowercase ( self ) -> List[str]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> None:
_snake_case = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
if self.legacy_behaviour:
_snake_case = []
_snake_case = [self.eos_token_id, self.cur_lang_code]
else:
_snake_case = [self.cur_lang_code]
_snake_case = [self.eos_token_id]
_snake_case = self.convert_ids_to_tokens(self.prefix_tokens )
_snake_case = self.convert_ids_to_tokens(self.suffix_tokens )
_snake_case = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str ,pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,)
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> None:
_snake_case = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
if self.legacy_behaviour:
_snake_case = []
_snake_case = [self.eos_token_id, self.cur_lang_code]
else:
_snake_case = [self.cur_lang_code]
_snake_case = [self.eos_token_id]
_snake_case = self.convert_ids_to_tokens(self.prefix_tokens )
_snake_case = self.convert_ids_to_tokens(self.suffix_tokens )
_snake_case = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str ,pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,)
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
_snake_case = os.path.join(
_SCREAMING_SNAKE_CASE ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file ,_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 364 |
'''simple docstring'''
def __a ( _UpperCamelCase: int ) -> str:
"""simple docstring"""
if number > 0:
raise ValueError("input must be a negative integer" )
_snake_case = len(bin(_UpperCamelCase )[3:] )
_snake_case = bin(abs(_UpperCamelCase ) - (1 << binary_number_length) )[3:]
_snake_case = (
(
"1"
+ "0" * (binary_number_length - len(_UpperCamelCase ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 142 | 0 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def UpperCamelCase__ ( A__ , A__ , A__ , A__ , A__ ) -> float:
snake_case__ : List[Any] = np.array([[1, item, train_mtch[i]] for i, item in enumerate(A__ )] )
snake_case__ : Optional[int] = np.array(A__ )
snake_case__ : List[Any] = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , A__ ) ) , x.transpose() ) , A__ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def UpperCamelCase__ ( A__ , A__ , A__ ) -> float:
snake_case__ : Optional[Any] = (1, 2, 1)
snake_case__ : int = (1, 1, 0, 7)
snake_case__ : Union[str, Any] = SARIMAX(
A__ , exog=A__ , order=A__ , seasonal_order=A__ )
snake_case__ : List[str] = model.fit(disp=A__ , maxiter=600 , method='nm' )
snake_case__ : str = model_fit.predict(1 , len(A__ ) , exog=[test_match] )
return result[0]
def UpperCamelCase__ ( A__ , A__ , A__ ) -> float:
snake_case__ : Optional[Any] = SVR(kernel='rbf' , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(A__ , A__ )
snake_case__ : Any = regressor.predict(A__ )
return y_pred[0]
def UpperCamelCase__ ( A__ ) -> float:
train_user.sort()
snake_case__ : Optional[Any] = np.percentile(A__ , 25 )
snake_case__ : Union[str, Any] = np.percentile(A__ , 75 )
snake_case__ : Optional[int] = qa - qa
snake_case__ : int = qa - (iqr * 0.1)
return low_lim
def UpperCamelCase__ ( A__ , A__ ) -> bool:
snake_case__ : Optional[Any] = 0
snake_case__ : Union[str, Any] = 0
for i in list_vote:
if i > actual_result:
snake_case__ : Tuple = not_safe + 1
else:
if abs(abs(A__ ) - abs(A__ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
lowerCAmelCase__ : int = [[1_82_31, 0.0, 1], [2_26_21, 1.0, 2], [1_56_75, 0.0, 3], [2_35_83, 1.0, 4]]
lowerCAmelCase__ : Optional[int] = pd.DataFrame(
data_input, columns=['''total_user''', '''total_even''', '''days''']
)
lowerCAmelCase__ : Tuple = Normalizer().fit_transform(data_input_df.values)
# split data
lowerCAmelCase__ : Any = normalize_df[:, 2].tolist()
lowerCAmelCase__ : List[Any] = normalize_df[:, 0].tolist()
lowerCAmelCase__ : List[Any] = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
lowerCAmelCase__ : List[str] = normalize_df[:, [1, 2]].tolist()
lowerCAmelCase__ : str = x[: len(x) - 1]
lowerCAmelCase__ : Optional[Any] = x[len(x) - 1 :]
# for linear regression & sarimax
lowerCAmelCase__ : Union[str, Any] = total_date[: len(total_date) - 1]
lowerCAmelCase__ : List[Any] = total_user[: len(total_user) - 1]
lowerCAmelCase__ : Optional[Any] = total_match[: len(total_match) - 1]
lowerCAmelCase__ : Dict = total_date[len(total_date) - 1 :]
lowerCAmelCase__ : Dict = total_user[len(total_user) - 1 :]
lowerCAmelCase__ : str = total_match[len(total_match) - 1 :]
# voting system with forecasting
lowerCAmelCase__ : str = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
lowerCAmelCase__ : List[str] = '''''' if data_safety_checker(res_vote, tst_user) else '''not '''
print('''Today\'s data is {not_str}safe.''')
| 143 | from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __snake_case :
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=30 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=2 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=10 , __UpperCamelCase=0.0_2 , __UpperCamelCase=3 , __UpperCamelCase=None , ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Any = parent
snake_case__ : List[Any] = batch_size
snake_case__ : str = image_size
snake_case__ : Union[str, Any] = patch_size
snake_case__ : int = num_channels
snake_case__ : Union[str, Any] = is_training
snake_case__ : Optional[int] = use_labels
snake_case__ : str = hidden_size
snake_case__ : Any = num_hidden_layers
snake_case__ : Union[str, Any] = num_attention_heads
snake_case__ : int = intermediate_size
snake_case__ : Any = hidden_act
snake_case__ : int = hidden_dropout_prob
snake_case__ : Optional[int] = attention_probs_dropout_prob
snake_case__ : Optional[Any] = type_sequence_label_size
snake_case__ : int = initializer_range
snake_case__ : Dict = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case__ : int = (image_size // patch_size) ** 2
snake_case__ : Union[str, Any] = num_patches + 1
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Union[str, Any] = None
if self.use_labels:
snake_case__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : Any = self.get_config()
return config, pixel_values, labels
def __a ( self ) -> int:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Tuple:
'''simple docstring'''
snake_case__ : Optional[Any] = TFViTModel(config=__UpperCamelCase )
snake_case__ : Union[str, Any] = model(__UpperCamelCase , training=__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
snake_case__ : Any = self.image_size // 2
snake_case__ : str = pixel_values[:, :, :image_size, :image_size]
snake_case__ : Dict = model(__UpperCamelCase , interpolate_pos_encoding=__UpperCamelCase , training=__UpperCamelCase )
snake_case__ : Any = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Tuple:
'''simple docstring'''
snake_case__ : int = self.type_sequence_label_size
snake_case__ : Optional[int] = TFViTForImageClassification(__UpperCamelCase )
snake_case__ : Tuple = model(__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
snake_case__ : str = self.image_size // 2
snake_case__ : Dict = pixel_values[:, :, :image_size, :image_size]
snake_case__ : Tuple = model(__UpperCamelCase , interpolate_pos_encoding=__UpperCamelCase , training=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case__ : Union[str, Any] = 1
snake_case__ : List[str] = TFViTForImageClassification(__UpperCamelCase )
snake_case__ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ : str = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : Optional[Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : Optional[int] = config_and_inputs
snake_case__ : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
__lowerCamelCase = (
{"""feature-extraction""": TFViTModel, """image-classification""": TFViTForImageClassification}
if is_tf_available()
else {}
)
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Dict = TFViTModelTester(self )
snake_case__ : int = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='ViT does not use inputs_embeds' )
def __a ( self ) -> Tuple:
'''simple docstring'''
pass
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ , snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Union[str, Any] = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
snake_case__ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , tf.keras.layers.Layer ) )
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ , snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[Any] = model_class(__UpperCamelCase )
snake_case__ : List[str] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : str = [*signature.parameters.keys()]
snake_case__ : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def __a ( self ) -> str:
'''simple docstring'''
snake_case__ : str = TFViTModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(__UpperCamelCase )
def UpperCamelCase__ ( ) -> int:
snake_case__ : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __snake_case ( unittest.TestCase ):
@cached_property
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Any = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' )
snake_case__ : Any = self.default_image_processor
snake_case__ : Union[str, Any] = prepare_img()
snake_case__ : Tuple = image_processor(images=__UpperCamelCase , return_tensors='tf' )
# forward pass
snake_case__ : str = model(**__UpperCamelCase )
# verify the logits
snake_case__ : Optional[int] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
snake_case__ : List[Any] = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 )
| 143 | 1 |
'''simple docstring'''
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
__UpperCAmelCase =DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
__UpperCAmelCase ="main"
# Default branch name
__UpperCAmelCase ="f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"
# One particular commit (not the top of `main`)
__UpperCAmelCase ="aaaaaaa"
# This commit does not exist, so we should 404.
__UpperCAmelCase ="d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"
# Sha-1 of config.json on the top of `main`, for checking purposes
__UpperCAmelCase ="4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"
@contextlib.contextmanager
def __lowerCAmelCase ( ) -> Optional[int]:
print('''Welcome!''' )
yield
print('''Bye!''' )
@contextlib.contextmanager
def __lowerCAmelCase ( ) -> int:
print('''Bonjour!''' )
yield
print('''Au revoir!''' )
class a__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
assert transformers.__spec__ is not None
assert importlib.util.find_spec('''transformers''' ) is not None
class a__ ( unittest.TestCase ):
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : List[Any] ):
"""simple docstring"""
with ContextManagers([] ):
print('''Transformers are awesome!''' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , '''Transformers are awesome!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def SCREAMING_SNAKE_CASE__ ( self : Any , a : Any ):
"""simple docstring"""
with ContextManagers([context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Welcome!\nTransformers are awesome!\nBye!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def SCREAMING_SNAKE_CASE__ ( self : str , a : Optional[int] ):
"""simple docstring"""
with ContextManagers([context_fr(), context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n''' )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
self.assertEqual(find_labels(a ) , ['''labels'''] )
self.assertEqual(find_labels(a ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(a ) , ['''start_positions''', '''end_positions'''] )
class a__ ( UpperCAmelCase__ ):
pass
self.assertEqual(find_labels(a ) , ['''labels'''] )
@require_tf
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
self.assertEqual(find_labels(a ) , ['''labels'''] )
self.assertEqual(find_labels(a ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(a ) , ['''start_positions''', '''end_positions'''] )
class a__ ( UpperCAmelCase__ ):
pass
self.assertEqual(find_labels(a ) , ['''labels'''] )
@require_flax
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
self.assertEqual(find_labels(a ) , [] )
self.assertEqual(find_labels(a ) , [] )
self.assertEqual(find_labels(a ) , [] )
class a__ ( UpperCAmelCase__ ):
pass
self.assertEqual(find_labels(a ) , [] )
| 237 | '''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ ) -> str:
return "".join(chr(ord(UpperCamelCase__ ) - 32 ) if '''a''' <= char <= '''z''' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 237 | 1 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (DDIMParallelScheduler,)
SCREAMING_SNAKE_CASE_ = (("""eta""", 0.0), ("""num_inference_steps""", 50))
def UpperCAmelCase__ ( self :Union[str, Any] , **lowerCamelCase_ :Tuple ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] ={
'num_train_timesteps': 1_000,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**lowerCamelCase_ )
return config
def UpperCAmelCase__ ( self :Union[str, Any] , **lowerCamelCase_ :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : int =self.scheduler_classes[0]
lowerCamelCase__ : str =self.get_scheduler_config(**lowerCamelCase_ )
lowerCamelCase__ : List[Any] =scheduler_class(**lowerCamelCase_ )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =10, 0.0
lowerCamelCase__ : List[Any] =self.dummy_model()
lowerCamelCase__ : int =self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase_ )
for t in scheduler.timesteps:
lowerCamelCase__ : Optional[Any] =model(lowerCamelCase_ , lowerCamelCase_ )
lowerCamelCase__ : str =scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
return sample
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCamelCase_ )
lowerCamelCase__ : List[str] =self.scheduler_classes[0]
lowerCamelCase__ : List[str] =self.get_scheduler_config(steps_offset=1 )
lowerCamelCase__ : Any =scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCamelCase_ , beta_end=lowerCamelCase_ )
def UpperCAmelCase__ ( self :Optional[Any] ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase_ )
def UpperCAmelCase__ ( self :Optional[int] ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase_ )
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCamelCase_ )
def UpperCAmelCase__ ( self :Optional[int] ):
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowerCamelCase_ )
def UpperCAmelCase__ ( self :Tuple ):
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowerCamelCase_ )
def UpperCAmelCase__ ( self :List[Any] ):
"""simple docstring"""
self.check_over_configs(thresholding=lowerCamelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCamelCase_ , prediction_type=lowerCamelCase_ , sample_max_value=lowerCamelCase_ , )
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowerCamelCase_ )
def UpperCAmelCase__ ( self :Tuple ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ )
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=lowerCamelCase_ , eta=lowerCamelCase_ )
def UpperCAmelCase__ ( self :Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : List[str] =self.scheduler_classes[0]
lowerCamelCase__ : Optional[int] =self.get_scheduler_config()
lowerCamelCase__ : Union[str, Any] =scheduler_class(**lowerCamelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_47_71 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_24_60 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
lowerCamelCase__ : int =self.scheduler_classes[0]
lowerCamelCase__ : Tuple =self.get_scheduler_config()
lowerCamelCase__ : List[str] =scheduler_class(**lowerCamelCase_ )
lowerCamelCase__ , lowerCamelCase__ : List[str] =10, 0.0
scheduler.set_timesteps(lowerCamelCase_ )
lowerCamelCase__ : List[str] =self.dummy_model()
lowerCamelCase__ : Optional[int] =self.dummy_sample_deter
lowerCamelCase__ : Dict =self.dummy_sample_deter + 0.1
lowerCamelCase__ : List[str] =self.dummy_sample_deter - 0.1
lowerCamelCase__ : Optional[int] =samplea.shape[0]
lowerCamelCase__ : Dict =torch.stack([samplea, samplea, samplea] , dim=0 )
lowerCamelCase__ : List[Any] =torch.arange(lowerCamelCase_ )[0:3, None].repeat(1 , lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] =model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
lowerCamelCase__ : Any =scheduler.batch_step_no_noise(lowerCamelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , lowerCamelCase_ )
lowerCamelCase__ : List[str] =torch.sum(torch.abs(lowerCamelCase_ ) )
lowerCamelCase__ : List[str] =torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1e-2
assert abs(result_mean.item() - 0.49_82 ) < 1e-3
def UpperCAmelCase__ ( self :List[str] ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =self.full_loop()
lowerCamelCase__ : List[str] =torch.sum(torch.abs(lowerCamelCase_ ) )
lowerCamelCase__ : Tuple =torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1e-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3
def UpperCAmelCase__ ( self :Tuple ):
"""simple docstring"""
lowerCamelCase__ : str =self.full_loop(prediction_type='v_prediction' )
lowerCamelCase__ : Optional[int] =torch.sum(torch.abs(lowerCamelCase_ ) )
lowerCamelCase__ : Optional[int] =torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 52.53_02 ) < 1e-2
assert abs(result_mean.item() - 0.06_84 ) < 1e-3
def UpperCAmelCase__ ( self :Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : str =self.full_loop(set_alpha_to_one=lowerCamelCase_ , beta_start=0.01 )
lowerCamelCase__ : Union[str, Any] =torch.sum(torch.abs(lowerCamelCase_ ) )
lowerCamelCase__ : List[str] =torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1e-2
assert abs(result_mean.item() - 0.19_51 ) < 1e-3
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =self.full_loop(set_alpha_to_one=lowerCamelCase_ , beta_start=0.01 )
lowerCamelCase__ : Any =torch.sum(torch.abs(lowerCamelCase_ ) )
lowerCamelCase__ : List[str] =torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1e-2
assert abs(result_mean.item() - 0.19_41 ) < 1e-3 | 126 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase = logging.get_logger(__name__)
class A_ ( A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""pixel_values"""]
def __init__( self :Union[str, Any] , lowerCamelCase_ :bool = True , lowerCamelCase_ :Dict[str, int] = None , lowerCamelCase_ :int = 0.9 , lowerCamelCase_ :PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase_ :bool = True , lowerCamelCase_ :Dict[str, int] = None , lowerCamelCase_ :Union[int, float] = 1 / 255 , lowerCamelCase_ :bool = True , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Union[float, List[float]]] = None , lowerCamelCase_ :Optional[Union[float, List[float]]] = None , **lowerCamelCase_ :Tuple , ):
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
lowerCamelCase__ : str =size if size is not None else {'shortest_edge': 224}
lowerCamelCase__ : List[str] =get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] =crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowerCamelCase__ : str =get_size_dict(lowerCamelCase_ , param_name='crop_size' )
lowerCamelCase__ : Tuple =do_resize
lowerCamelCase__ : List[Any] =size
lowerCamelCase__ : List[str] =crop_pct
lowerCamelCase__ : Union[str, Any] =resample
lowerCamelCase__ : List[str] =do_center_crop
lowerCamelCase__ : List[str] =crop_size
lowerCamelCase__ : List[Any] =do_rescale
lowerCamelCase__ : List[str] =rescale_factor
lowerCamelCase__ : Tuple =do_normalize
lowerCamelCase__ : int =image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCamelCase__ : List[Any] =image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase__ ( self :Any , lowerCamelCase_ :np.ndarray , lowerCamelCase_ :Dict[str, int] , lowerCamelCase_ :Optional[float] = None , lowerCamelCase_ :PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase_ :Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ :Any , ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f"""size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
if crop_pct is not None:
if "shortest_edge" in size:
lowerCamelCase__ : Optional[int] =int(size['shortest_edge'] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
lowerCamelCase__ : Union[str, Any] =int(size['height'] / crop_pct )
else:
lowerCamelCase__ : Any =(int(size['height'] / crop_pct ), int(size['width'] / crop_pct ))
else:
raise ValueError('Invalid size for resize: {}'.format(lowerCamelCase_ ) )
lowerCamelCase__ : Tuple =get_resize_output_image_size(lowerCamelCase_ , size=lowerCamelCase_ , default_to_square=lowerCamelCase_ )
else:
if "shortest_edge" in size:
lowerCamelCase__ : str =get_resize_output_image_size(lowerCamelCase_ , size=size['shortest_edge'] , default_to_square=lowerCamelCase_ )
elif "height" in size and "width" in size:
lowerCamelCase__ : Union[str, Any] =(size['height'], size['width'])
else:
raise ValueError('Invalid size for resize: {}'.format(lowerCamelCase_ ) )
return resize(lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase__ ( self :Any , lowerCamelCase_ :np.ndarray , lowerCamelCase_ :Dict[str, int] , lowerCamelCase_ :Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ :str , ):
"""simple docstring"""
lowerCamelCase__ : Tuple =get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""size must contain 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(lowerCamelCase_ , size=(size['height'], size['width']) , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase__ ( self :int , lowerCamelCase_ :np.ndarray , lowerCamelCase_ :Union[int, float] , lowerCamelCase_ :Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ :List[str] , ):
"""simple docstring"""
return rescale(lowerCamelCase_ , scale=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase__ ( self :List[Any] , lowerCamelCase_ :np.ndarray , lowerCamelCase_ :Union[float, List[float]] , lowerCamelCase_ :Union[float, List[float]] , lowerCamelCase_ :Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ :Tuple , ):
"""simple docstring"""
return normalize(lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase__ ( self :Any , lowerCamelCase_ :ImageInput , lowerCamelCase_ :bool = None , lowerCamelCase_ :Dict[str, int] = None , lowerCamelCase_ :int = None , lowerCamelCase_ :PILImageResampling = None , lowerCamelCase_ :bool = None , lowerCamelCase_ :Dict[str, int] = None , lowerCamelCase_ :bool = None , lowerCamelCase_ :float = None , lowerCamelCase_ :bool = None , lowerCamelCase_ :Optional[Union[float, List[float]]] = None , lowerCamelCase_ :Optional[Union[float, List[float]]] = None , lowerCamelCase_ :Optional[Union[str, TensorType]] = None , lowerCamelCase_ :ChannelDimension = ChannelDimension.FIRST , **lowerCamelCase_ :List[str] , ):
"""simple docstring"""
lowerCamelCase__ : Dict =do_resize if do_resize is not None else self.do_resize
lowerCamelCase__ : Union[str, Any] =crop_pct if crop_pct is not None else self.crop_pct
lowerCamelCase__ : Tuple =resample if resample is not None else self.resample
lowerCamelCase__ : Any =do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase__ : Optional[Any] =do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ : Optional[int] =rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__ : Optional[Any] =do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__ : List[str] =image_mean if image_mean is not None else self.image_mean
lowerCamelCase__ : List[Any] =image_std if image_std is not None else self.image_std
lowerCamelCase__ : int =size if size is not None else self.size
lowerCamelCase__ : Tuple =get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ )
lowerCamelCase__ : Dict =crop_size if crop_size is not None else self.crop_size
lowerCamelCase__ : str =get_size_dict(lowerCamelCase_ , param_name='crop_size' )
lowerCamelCase__ : Dict =make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_pct is None:
raise ValueError('Crop_pct must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCamelCase__ : List[str] =[to_numpy_array(lowerCamelCase_ ) for image in images]
if do_resize:
lowerCamelCase__ : Tuple =[self.resize(image=lowerCamelCase_ , size=lowerCamelCase_ , crop_pct=lowerCamelCase_ , resample=lowerCamelCase_ ) for image in images]
if do_center_crop:
lowerCamelCase__ : Union[str, Any] =[self.center_crop(image=lowerCamelCase_ , size=lowerCamelCase_ ) for image in images]
if do_rescale:
lowerCamelCase__ : str =[self.rescale(image=lowerCamelCase_ , scale=lowerCamelCase_ ) for image in images]
if do_normalize:
lowerCamelCase__ : Optional[Any] =[self.normalize(image=lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ ) for image in images]
lowerCamelCase__ : Optional[Any] =[to_channel_dimension_format(lowerCamelCase_ , lowerCamelCase_ ) for image in images]
lowerCamelCase__ : List[str] ={'pixel_values': images}
return BatchFeature(data=lowerCamelCase_ , tensor_type=lowerCamelCase_ ) | 126 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Any = logging.get_logger(__name__)
lowercase__ : int = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class a__ ( UpperCamelCase__ ):
a : Optional[Any] = """sew-d"""
def __init__( self , A=32 , A=768 , A=12 , A=12 , A=3072 , A=2 , A=512 , A=256 , A=True , A=True , A=("p2c", "c2p") , A="layer_norm" , A="gelu_python" , A=0.1 , A=0.1 , A=0.1 , A=0.0 , A=0.1 , A=0.0_2 , A=1e-7 , A=1e-5 , A="group" , A="gelu" , A=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , A=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , A=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , A=False , A=128 , A=16 , A=True , A=0.0_5 , A=10 , A=2 , A=0.0 , A=10 , A=0 , A="mean" , A=False , A=False , A=256 , A=0 , A=1 , A=2 , **A , ) -> Dict:
'''simple docstring'''
super().__init__(**A , pad_token_id=A , bos_token_id=A , eos_token_id=A )
a = hidden_size
a = feat_extract_norm
a = feat_extract_activation
a = list(A )
a = list(A )
a = list(A )
a = conv_bias
a = num_conv_pos_embeddings
a = num_conv_pos_embedding_groups
a = len(self.conv_dim )
a = num_hidden_layers
a = intermediate_size
a = squeeze_factor
a = max_position_embeddings
a = position_buckets
a = share_att_key
a = relative_attention
a = norm_rel_ebd
a = list(A )
a = hidden_act
a = num_attention_heads
a = hidden_dropout
a = attention_dropout
a = activation_dropout
a = feat_proj_dropout
a = final_dropout
a = layer_norm_eps
a = feature_layer_norm_eps
a = initializer_range
a = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a = apply_spec_augment
a = mask_time_prob
a = mask_time_length
a = mask_time_min_masks
a = mask_feature_prob
a = mask_feature_length
a = mask_feature_min_masks
# ctc loss
a = ctc_loss_reduction
a = ctc_zero_infinity
# sequence classification
a = use_weighted_layer_sum
a = classifier_proj_size
@property
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 369 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class a__ :
def __init__( self , A , A=2 , A=32 , A=16 , A=3 , A=True , A=True , A=32 , A=4 , A=[0, 1, 2, 3] , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=0.0_2 , A=3 , A=[1, 384, 24, 24] , A=True , A=None , ) -> Any:
'''simple docstring'''
a = parent
a = batch_size
a = image_size
a = patch_size
a = num_channels
a = is_training
a = use_labels
a = hidden_size
a = num_hidden_layers
a = backbone_out_indices
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = initializer_range
a = num_labels
a = backbone_featmap_shape
a = scope
a = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
a = (image_size // patch_size) ** 2
a = num_patches + 1
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a = None
if self.use_labels:
a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
a = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 192, 384, 768],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=A , backbone_featmap_shape=self.backbone_featmap_shape , )
def lowerCAmelCase_ ( self , A , A , A ) -> str:
'''simple docstring'''
a = DPTModel(config=A )
model.to(A )
model.eval()
a = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self , A , A , A ) -> Optional[int]:
'''simple docstring'''
a = self.num_labels
a = DPTForDepthEstimation(A )
model.to(A )
model.eval()
a = model(A )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def lowerCAmelCase_ ( self , A , A , A ) -> Dict:
'''simple docstring'''
a = self.num_labels
a = DPTForSemanticSegmentation(A )
model.to(A )
model.eval()
a = model(A , labels=A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
a = self.prepare_config_and_inputs()
a , a , a = config_and_inputs
a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
a : Union[str, Any] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
a : Union[str, Any] = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a : Optional[int] = False
a : List[Any] = False
a : int = False
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
a = DPTModelTester(self )
a = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=37 )
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
pass
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A , nn.Linear ) )
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(A )
a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a = [*signature.parameters.keys()]
a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , A )
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*A )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A )
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = True
if model_class in get_values(A ):
continue
a = model_class(A )
model.to(A )
model.train()
a = self._prepare_for_class(A , A , return_labels=A )
a = model(**A ).loss
loss.backward()
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = False
a = True
if model_class in get_values(A ) or not model_class.supports_gradient_checkpointing:
continue
a = model_class(A )
model.to(A )
model.gradient_checkpointing_enable()
model.train()
a = self._prepare_for_class(A , A , return_labels=A )
a = model(**A ).loss
loss.backward()
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = _config_zero_init(A )
for model_class in self.all_model_classes:
a = model_class(config=A )
# Skip the check for the backbone
a = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
a = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
pass
@slow
def lowerCAmelCase_ ( self ) -> Tuple:
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
a = DPTModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = "add"
with self.assertRaises(A ):
a = DPTForDepthEstimation(A )
def SCREAMING_SNAKE_CASE ( ) -> str:
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
@slow
class a__ ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
a = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
a = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(A )
a = prepare_img()
a = image_processor(images=A , return_tensors="pt" ).to(A )
# forward pass
with torch.no_grad():
a = model(**A )
a = outputs.predicted_depth
# verify the predicted depth
a = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , A )
a = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(A )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , A , atol=1e-4 ) )
| 180 | 0 |
from __future__ import annotations
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> list[int]:
_lowercase : List[Any] = 0
_lowercase : Tuple = len(A__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
_lowercase : Optional[Any] = i + 1
else:
_lowercase : int = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{two_pointer([2, 7, 11, 15], 9) = }")
| 21 | import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __snake_case ( unittest.TestCase ):
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ , snake_case__ : List[str] = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=__UpperCamelCase , dtype=jnp.bfloataa )
snake_case__ , snake_case__ : List[str] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=__UpperCamelCase , from_pt=__UpperCamelCase , dtype=jnp.bfloataa )
snake_case__ : Optional[Any] = controlnet_params
snake_case__ : Any = 'bird'
snake_case__ : Any = jax.device_count()
snake_case__ : Any = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case__ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
snake_case__ : Optional[int] = pipe.prepare_image_inputs([canny_image] * num_samples )
snake_case__ : Any = jax.random.PRNGKey(0 )
snake_case__ : Dict = jax.random.split(__UpperCamelCase , jax.device_count() )
snake_case__ : Any = replicate(__UpperCamelCase )
snake_case__ : Union[str, Any] = shard(__UpperCamelCase )
snake_case__ : Any = shard(__UpperCamelCase )
snake_case__ : Dict = pipe(
prompt_ids=__UpperCamelCase , image=__UpperCamelCase , params=__UpperCamelCase , prng_seed=__UpperCamelCase , num_inference_steps=50 , jit=__UpperCamelCase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case__ : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case__ : Optional[int] = images[0, 253:256, 253:256, -1]
snake_case__ : Optional[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case__ : str = jnp.array(
[0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ , snake_case__ : Union[str, Any] = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=__UpperCamelCase , dtype=jnp.bfloataa )
snake_case__ , snake_case__ : Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=__UpperCamelCase , from_pt=__UpperCamelCase , dtype=jnp.bfloataa )
snake_case__ : List[str] = controlnet_params
snake_case__ : Optional[Any] = 'Chef in the kitchen'
snake_case__ : List[Any] = jax.device_count()
snake_case__ : int = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case__ : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
snake_case__ : int = pipe.prepare_image_inputs([pose_image] * num_samples )
snake_case__ : Optional[Any] = jax.random.PRNGKey(0 )
snake_case__ : Any = jax.random.split(__UpperCamelCase , jax.device_count() )
snake_case__ : List[Any] = replicate(__UpperCamelCase )
snake_case__ : List[str] = shard(__UpperCamelCase )
snake_case__ : Optional[int] = shard(__UpperCamelCase )
snake_case__ : Any = pipe(
prompt_ids=__UpperCamelCase , image=__UpperCamelCase , params=__UpperCamelCase , prng_seed=__UpperCamelCase , num_inference_steps=50 , jit=__UpperCamelCase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case__ : str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case__ : Optional[int] = images[0, 253:256, 253:256, -1]
snake_case__ : str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case__ : Any = jnp.array(
[[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 143 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE :Any = logging.get_logger(__name__)
def UpperCAmelCase_ ( __lowercase : int ) -> Any:
'''simple docstring'''
_UpperCAmelCase = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["stage2", "stage3", "stage4"] , )
_UpperCAmelCase = DetaConfig(
backbone_config=__lowercase , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=__lowercase , with_box_refine=__lowercase , two_stage=__lowercase , )
# set labels
_UpperCAmelCase = "huggingface/label-files"
if "o365" in model_name:
_UpperCAmelCase = 366
_UpperCAmelCase = "object365-id2label.json"
else:
_UpperCAmelCase = 91
_UpperCAmelCase = "coco-detection-id2label.json"
_UpperCAmelCase = num_labels
_UpperCAmelCase = json.load(open(cached_download(hf_hub_url(__lowercase , __lowercase , repo_type="dataset" ) ) , "r" ) )
_UpperCAmelCase = {int(__lowercase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase_ ( __lowercase : int ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight") )
rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.reduction.weight', f'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.weight', f'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.bias', f'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight") )
rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias") )
rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight") )
rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias") )
rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight") )
rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', f'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', f'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', f'model.encoder.layers.{i}.self_attn.attention_weights.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', f'model.encoder.layers.{i}.self_attn.attention_weights.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.weight', f'model.encoder.layers.{i}.self_attn.value_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.bias', f'model.encoder.layers.{i}.self_attn.value_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.weight', f'model.encoder.layers.{i}.self_attn.output_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.bias', f'model.encoder.layers.{i}.self_attn.output_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.weight', f'model.encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'model.encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'model.encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'model.encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'model.encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'model.encoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'model.encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'model.encoder.layers.{i}.final_layer_norm.bias') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', f'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', f'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', f'model.decoder.layers.{i}.encoder_attn.value_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', f'model.decoder.layers.{i}.encoder_attn.value_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', f'model.decoder.layers.{i}.encoder_attn.output_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', f'model.decoder.layers.{i}.encoder_attn.output_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.weight', f'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'model.decoder.layers.{i}.self_attn.out_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'model.decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.weight', f'model.decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.bias', f'model.decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'model.decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'model.decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'model.decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'model.decoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'model.decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'model.decoder.layers.{i}.final_layer_norm.bias') )
# fmt: on
return rename_keys
def UpperCAmelCase_ ( __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : Any ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = dct.pop(__lowercase )
_UpperCAmelCase = val
def UpperCAmelCase_ ( __lowercase : List[Any] , __lowercase : List[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_UpperCAmelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_UpperCAmelCase = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' )
_UpperCAmelCase = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[:dim, :]
_UpperCAmelCase = in_proj_bias[: dim]
_UpperCAmelCase = in_proj_weight[
dim : dim * 2, :
]
_UpperCAmelCase = in_proj_bias[
dim : dim * 2
]
_UpperCAmelCase = in_proj_weight[
-dim :, :
]
_UpperCAmelCase = in_proj_bias[-dim :]
# fmt: on
def UpperCAmelCase_ ( __lowercase : Tuple , __lowercase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
_UpperCAmelCase = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
_UpperCAmelCase = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[:hidden_size, :]
_UpperCAmelCase = in_proj_bias[:hidden_size]
_UpperCAmelCase = in_proj_weight[
hidden_size : hidden_size * 2, :
]
_UpperCAmelCase = in_proj_bias[hidden_size : hidden_size * 2]
_UpperCAmelCase = in_proj_weight[-hidden_size:, :]
_UpperCAmelCase = in_proj_bias[-hidden_size:]
def UpperCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( __lowercase : int , __lowercase : Union[str, Any] , __lowercase : int ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = get_deta_config(__lowercase )
# load original state dict
if model_name == "deta-swin-large":
_UpperCAmelCase = hf_hub_download(repo_id="nielsr/deta-checkpoints" , filename="adet_swin_ft.pth" )
elif model_name == "deta-swin-large-o365":
_UpperCAmelCase = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365" , filename="deta_swin_pt_o365.pth" )
else:
raise ValueError(f'Model name {model_name} not supported' )
_UpperCAmelCase = torch.load(__lowercase , map_location="cpu" )["model"]
# original state dict
for name, param in state_dict.items():
print(__lowercase , param.shape )
# rename keys
_UpperCAmelCase = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
read_in_swin_q_k_v(__lowercase , config.backbone_config )
read_in_decoder_q_k_v(__lowercase , __lowercase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
_UpperCAmelCase = state_dict.pop(__lowercase )
_UpperCAmelCase = val
if "input_proj" in key:
_UpperCAmelCase = state_dict.pop(__lowercase )
_UpperCAmelCase = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
_UpperCAmelCase = state_dict.pop(__lowercase )
_UpperCAmelCase = val
# finally, create HuggingFace model and load state dict
_UpperCAmelCase = DetaForObjectDetection(__lowercase )
model.load_state_dict(__lowercase )
model.eval()
_UpperCAmelCase = "cuda" if torch.cuda.is_available() else "cpu"
model.to(__lowercase )
# load image processor
_UpperCAmelCase = DetaImageProcessor(format="coco_detection" )
# verify our conversion on image
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = processor(images=__lowercase , return_tensors="pt" )
_UpperCAmelCase = encoding["pixel_values"]
_UpperCAmelCase = model(pixel_values.to(__lowercase ) )
# verify logits
print("Logits:" , outputs.logits[0, :3, :3] )
print("Boxes:" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
_UpperCAmelCase = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
_UpperCAmelCase = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
_UpperCAmelCase = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
_UpperCAmelCase = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__lowercase ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__lowercase ) , atol=1E-4 )
print("Everything ok!" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
processor.save_pretrained(__lowercase )
# Push to hub
if push_to_hub:
print("Pushing model and processor to hub..." )
model.push_to_hub(f'jozhang97/{model_name}' )
processor.push_to_hub(f'jozhang97/{model_name}' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__SCREAMING_SNAKE_CASE :Tuple = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 352 |
'''simple docstring'''
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ ( lowerCAmelCase_ ):
def __init__( self : Optional[int] , snake_case_ : Dict , snake_case_ : int=1_3 , snake_case_ : int=7 , snake_case_ : Union[str, Any]=True , snake_case_ : int=True , snake_case_ : Tuple=True , snake_case_ : Optional[Any]=True , snake_case_ : int=9_9 , snake_case_ : Tuple=3_2 , snake_case_ : Dict=5 , snake_case_ : str=4 , snake_case_ : Union[str, Any]=3_7 , snake_case_ : Dict="gelu" , snake_case_ : Any=0.1 , snake_case_ : Optional[Any]=0.1 , snake_case_ : List[Any]=5_1_2 , snake_case_ : List[Any]=1_6 , snake_case_ : List[Any]=2 , snake_case_ : Any=0.0_2 , snake_case_ : List[str]=False , snake_case_ : Dict=True , snake_case_ : Union[str, Any]="None" , snake_case_ : Dict=3 , snake_case_ : Union[str, Any]=4 , snake_case_ : Dict=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = relative_attention
_UpperCAmelCase = position_biased_input
_UpperCAmelCase = pos_att_type
_UpperCAmelCase = scope
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self : Optional[Any] ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowercase ( self : Optional[int] , snake_case_ : Dict ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowercase ( self : str , snake_case_ : Optional[int] , snake_case_ : str , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : List[str] ):
_UpperCAmelCase = DebertaVaModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )[0]
_UpperCAmelCase = model(snake_case_ , token_type_ids=snake_case_ )[0]
_UpperCAmelCase = model(snake_case_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowercase ( self : Optional[int] , snake_case_ : str , snake_case_ : int , snake_case_ : List[str] , snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : List[Any] ):
_UpperCAmelCase = DebertaVaForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self : Optional[int] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Optional[int] ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = DebertaVaForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(snake_case_ )
def lowercase ( self : Any , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : List[str] , snake_case_ : List[str] , snake_case_ : Any , snake_case_ : List[Any] ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = DebertaVaForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self : List[Any] , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : Optional[Any] , snake_case_ : Any ):
_UpperCAmelCase = DebertaVaForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self : Optional[Any] , snake_case_ : str , snake_case_ : Dict , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : str ):
_UpperCAmelCase = DebertaVaForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self : str ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : List[str] = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowerCamelCase : Tuple = (
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase : Any = True
_lowerCamelCase : int = False
_lowerCamelCase : int = False
_lowerCamelCase : Dict = False
_lowerCamelCase : int = False
def lowercase ( self : List[str] ):
_UpperCAmelCase = DebertaVaModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , hidden_size=3_7 )
def lowercase ( self : str ):
self.config_tester.run_common_tests()
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*snake_case_ )
def lowercase ( self : List[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*snake_case_ )
def lowercase ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*snake_case_ )
def lowercase ( self : Dict ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*snake_case_ )
def lowercase ( self : List[str] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*snake_case_ )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*snake_case_ )
@slow
def lowercase ( self : Union[str, Any] ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = DebertaVaModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def lowercase ( self : Union[str, Any] ):
pass
@slow
def lowercase ( self : List[str] ):
_UpperCAmelCase = DebertaVaModel.from_pretrained("microsoft/deberta-v2-xlarge" )
_UpperCAmelCase = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
_UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCAmelCase = model(snake_case_ , attention_mask=snake_case_ )[0]
# compare the actual values for a slice.
_UpperCAmelCase = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case_ , atol=1e-4 ) , f'{output[:, 1:4, 1:4]}' )
| 156 | 0 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
a_ : Tuple = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 55 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def __lowerCamelCase ( *A : Dict , **A : Optional[int] ) ->Dict:
pass
@is_pipeline_test
@require_vision
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
_UpperCAmelCase : Optional[int] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def __lowerCamelCase ( self : Any , A : List[str] , A : Tuple , A : List[str] ) ->List[Any]:
lowerCamelCase__ : List[str] = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowerCamelCase__ : Union[str, Any] = [
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def __lowerCamelCase ( self : List[Any] , A : Optional[int] , A : Tuple ) ->Optional[Any]:
lowerCamelCase__ : str = object_detector(examples[0] , threshold=0.0 )
lowerCamelCase__ : Union[str, Any] = len(A )
self.assertGreater(A , 0 )
self.assertEqual(
A , [
{
'''score''': ANY(A ),
'''label''': ANY(A ),
'''box''': {'''xmin''': ANY(A ), '''ymin''': ANY(A ), '''xmax''': ANY(A ), '''ymax''': ANY(A )},
}
for i in range(A )
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def __lowerCamelCase ( self : Dict ) ->List[Any]:
pass
@require_torch
def __lowerCamelCase ( self : Optional[Any] ) ->List[Any]:
lowerCamelCase__ : Optional[int] = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowerCamelCase__ : List[Any] = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
{'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}},
{'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
] , )
lowerCamelCase__ : str = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
[
{'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
{'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}},
{'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
]
] , )
@require_torch
@slow
def __lowerCamelCase ( self : Union[str, Any] ) ->Optional[Any]:
lowerCamelCase__ : Tuple = pipeline('''zero-shot-object-detection''' )
lowerCamelCase__ : str = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
] , )
lowerCamelCase__ : List[Any] = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] , )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
[
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
],
[
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
],
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def __lowerCamelCase ( self : int ) ->Union[str, Any]:
pass
@require_torch
@slow
def __lowerCamelCase ( self : Optional[int] ) ->Optional[int]:
lowerCamelCase__ : Optional[Any] = 0.2
lowerCamelCase__ : List[Any] = pipeline('''zero-shot-object-detection''' )
lowerCamelCase__ : Any = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=A , )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
] , )
@require_torch
@slow
def __lowerCamelCase ( self : Any ) ->str:
lowerCamelCase__ : List[Any] = 2
lowerCamelCase__ : Union[str, Any] = pipeline('''zero-shot-object-detection''' )
lowerCamelCase__ : List[str] = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=A , )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
] , )
| 142 | 0 |
import qiskit
def snake_case( __magic_name__ , __magic_name__ ) -> qiskit.result.counts.Counts:
lowercase : Tuple = qiskit.Aer.get_backend('''aer_simulator''' )
lowercase : Union[str, Any] = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
lowercase : Any = qiskit.execute(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , shots=10_00 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase_ = half_adder(1, 1)
print(f'''Half Adder Output Qubit Counts: {counts}''') | 370 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def snake_case( __magic_name__ , __magic_name__ = "cpu" , __magic_name__ = None ) -> None:
'''simple docstring'''
lowercase : int = torch.load(__magic_name__ , map_location=__magic_name__ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__magic_name__ , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
lowercase : Any = v.half()
if save_path is None: # overwrite src_path
lowercase : str = src_path
torch.save(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
fire.Fire(convert) | 116 | 0 |
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : str =logging.get_logger(__name__)
def UpperCamelCase ( _lowerCamelCase : str ):
A__ = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
A__ = MaskFormerConfig(backbone_config=_lowerCamelCase )
A__ = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
A__ = 8_47
A__ = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
A__ = 1_50
A__ = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
A__ = 1_71
A__ = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
A__ = 1_33
A__ = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
A__ = 19
A__ = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
A__ = 65
A__ = "mapillary-vistas-id2label.json"
A__ = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
A__ = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
return config
def UpperCamelCase ( _lowerCamelCase : Dict ):
A__ = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm1.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm1.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.relative_position_index", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.proj.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.proj.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm2.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm2.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc1.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc1.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc2.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc2.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((F"backbone.layers.{i}.downsample.reduction.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((F"backbone.layers.{i}.downsample.norm.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((F"backbone.layers.{i}.downsample.norm.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append((F"backbone.norm{i}.weight", F"model.pixel_level_module.encoder.hidden_states_norms.{i}.weight") )
rename_keys.append((F"backbone.norm{i}.bias", F"model.pixel_level_module.encoder.hidden_states_norms.{i}.bias") )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F"sem_seg_head.adapter_{source_index}.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight") )
rename_keys.append((F"sem_seg_head.adapter_{source_index}.norm.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight") )
rename_keys.append((F"sem_seg_head.adapter_{source_index}.norm.bias", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias") )
rename_keys.append((F"sem_seg_head.layer_{source_index}.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight") )
rename_keys.append((F"sem_seg_head.layer_{source_index}.norm.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight") )
rename_keys.append((F"sem_seg_head.layer_{source_index}.norm.bias", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias") )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight", F"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias", F"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias") )
# cross-attention out projection
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight", F"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias", F"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias") )
# MLP 1
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight", F"model.transformer_module.decoder.layers.{idx}.fc1.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias", F"model.transformer_module.decoder.layers.{idx}.fc1.bias") )
# MLP 2
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight", F"model.transformer_module.decoder.layers.{idx}.fc2.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias", F"model.transformer_module.decoder.layers.{idx}.fc2.bias") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight", F"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias", F"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight", F"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias", F"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias") )
# layernorm 3 (final layernorm)
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight", F"model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias", F"model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((F"sem_seg_head.predictor.mask_embed.layers.{i}.weight", F"mask_embedder.{i}.0.weight") )
rename_keys.append((F"sem_seg_head.predictor.mask_embed.layers.{i}.bias", F"mask_embedder.{i}.0.bias") )
# fmt: on
return rename_keys
def UpperCamelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any , _lowerCamelCase : Any ):
A__ = dct.pop(_lowerCamelCase )
A__ = val
def UpperCamelCase ( _lowerCamelCase : int , _lowerCamelCase : int ):
A__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
A__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
A__ = state_dict.pop(F"backbone.layers.{i}.blocks.{j}.attn.qkv.weight" )
A__ = state_dict.pop(F"backbone.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[:dim, :]
A__ = in_proj_bias[: dim]
A__ = in_proj_weight[
dim : dim * 2, :
]
A__ = in_proj_bias[
dim : dim * 2
]
A__ = in_proj_weight[
-dim :, :
]
A__ = in_proj_bias[-dim :]
# fmt: on
def UpperCamelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : Any ):
# fmt: off
A__ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
A__ = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight" )
A__ = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[: hidden_size, :]
A__ = in_proj_bias[:config.hidden_size]
A__ = in_proj_weight[hidden_size : hidden_size * 2, :]
A__ = in_proj_bias[hidden_size : hidden_size * 2]
A__ = in_proj_weight[-hidden_size :, :]
A__ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
A__ = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight" )
A__ = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[: hidden_size, :]
A__ = in_proj_bias[:config.hidden_size]
A__ = in_proj_weight[hidden_size : hidden_size * 2, :]
A__ = in_proj_bias[hidden_size : hidden_size * 2]
A__ = in_proj_weight[-hidden_size :, :]
A__ = in_proj_bias[-hidden_size :]
# fmt: on
def UpperCamelCase ( ):
A__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A__ = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : bool = False ):
A__ = get_maskformer_config(_lowerCamelCase )
# load original state_dict
with open(_lowerCamelCase , "rb" ) as f:
A__ = pickle.load(_lowerCamelCase )
A__ = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
A__ = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_swin_q_k_v(_lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(_lowerCamelCase , _lowerCamelCase )
# update to torch tensors
for key, value in state_dict.items():
A__ = torch.from_numpy(_lowerCamelCase )
# load 🤗 model
A__ = MaskFormerForInstanceSegmentation(_lowerCamelCase )
model.eval()
for name, param in model.named_parameters():
print(_lowerCamelCase , param.shape )
A__, A__ = model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(_lowerCamelCase ) == 0, F"Unexpected keys: {unexpected_keys}"
# verify results
A__ = prepare_img()
if "vistas" in model_name:
A__ = 65
elif "cityscapes" in model_name:
A__ = 6_55_35
else:
A__ = 2_55
A__ = True if "ade" in model_name else False
A__ = MaskFormerImageProcessor(ignore_index=_lowerCamelCase , reduce_labels=_lowerCamelCase )
A__ = image_processor(_lowerCamelCase , return_tensors="pt" )
A__ = model(**_lowerCamelCase )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
A__ = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCamelCase , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"Saving model and image processor to {pytorch_dump_folder_path}" )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
image_processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(F"nielsr/{model_name}" )
image_processor.push_to_hub(F"nielsr/{model_name}" )
if __name__ == "__main__":
__lowerCAmelCase : Any =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="maskformer-swin-tiny-ade",
type=str,
help=("Name of the MaskFormer model you'd like to convert",),
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl",
type=str,
help="Path to the original state dict (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__lowerCAmelCase : str =parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 237 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
def UpperCamelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]=False ):
A__ = []
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token") )
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") )
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") )
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A__ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
# fmt: on
return rename_keys
def UpperCamelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : int , _lowerCamelCase : List[Any]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
A__ = ""
else:
A__ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
A__ = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[
: config.hidden_size, :
]
A__ = in_proj_bias[: config.hidden_size]
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ = in_proj_weight[
-config.hidden_size :, :
]
A__ = in_proj_bias[-config.hidden_size :]
def UpperCamelCase ( _lowerCamelCase : int ):
A__ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple , _lowerCamelCase : List[Any] ):
A__ = dct.pop(_lowerCamelCase )
A__ = val
def UpperCamelCase ( ):
A__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A__ = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any , _lowerCamelCase : Optional[Any]=False ):
A__ = BitConfig(
global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=_lowerCamelCase , )
A__ = ViTHybridConfig(backbone_config=_lowerCamelCase , image_size=3_84 , num_labels=10_00 )
A__ = False
# load original model from timm
A__ = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A__ = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
A__ = create_rename_keys(_lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A__ = "huggingface/label-files"
A__ = "imagenet-1k-id2label.json"
A__ = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
A__ = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
A__ = ViTHybridModel(_lowerCamelCase ).eval()
else:
A__ = ViTHybridForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# create image processor
A__ = create_transform(**resolve_data_config({} , model=_lowerCamelCase ) )
A__ = transform.transforms
A__ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
A__ = ViTHybridImageProcessor(
do_resize=_lowerCamelCase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowerCamelCase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=_lowerCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
A__ = prepare_img()
A__ = transform(_lowerCamelCase ).unsqueeze(0 )
A__ = processor(_lowerCamelCase , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
# verify logits
with torch.no_grad():
A__ = model(_lowerCamelCase )
A__ = outputs.logits
print("Predicted class:" , logits.argmax(-1 ).item() )
if base_model:
A__ = timm_model.forward_features(_lowerCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCamelCase , outputs.pooler_output , atol=1e-3 )
else:
A__ = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print(F"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(F"ybelkada/{vit_name}" )
processor.push_to_hub(F"ybelkada/{vit_name}" )
if __name__ == "__main__":
__lowerCAmelCase : List[str] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
__lowerCAmelCase : Optional[int] =parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 237 | 1 |
'''simple docstring'''
from __future__ import annotations
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ) -> None:
lowerCAmelCase__ : Union[str, Any] = data
lowerCAmelCase__ : Node | None = None
lowerCAmelCase__ : Node | None = None
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): # In Order traversal of the tree
"""simple docstring"""
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def _SCREAMING_SNAKE_CASE ( ): # Main function for testing.
"""simple docstring"""
lowerCAmelCase__ : List[Any] = Node(1 )
lowerCAmelCase__ : Optional[int] = Node(2 )
lowerCAmelCase__ : Union[str, Any] = Node(3 )
lowerCAmelCase__ : Optional[int] = Node(4 )
lowerCAmelCase__ : int = Node(5 )
lowerCAmelCase__ : str = Node(6 )
lowerCAmelCase__ : Any = Node(7 )
lowerCAmelCase__ : str = Node(8 )
lowerCAmelCase__ : Union[str, Any] = Node(9 )
print(is_full_binary_tree(UpperCamelCase__ ) )
print(depth_of_tree(UpperCamelCase__ ) )
print("""Tree is: """ )
display(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 352 |
'''simple docstring'''
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ) -> Tuple:
lowerCAmelCase__ : Union[str, Any] = n
lowerCAmelCase__ : int = [None] * self.n
lowerCAmelCase__ : Union[str, Any] = 0 # index of the first element
lowerCAmelCase__ : Union[str, Any] = 0
lowerCAmelCase__ : Union[str, Any] = 0
def __len__( self ) -> int:
return self.size
def UpperCAmelCase_ ( self ) -> bool:
return self.size == 0
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return False if self.is_empty() else self.array[self.front]
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Union[str, Any]:
if self.size >= self.n:
raise Exception("""QUEUE IS FULL""" )
lowerCAmelCase__ : str = data
lowerCAmelCase__ : List[str] = (self.rear + 1) % self.n
self.size += 1
return self
def UpperCAmelCase_ ( self ) -> int:
if self.size == 0:
raise Exception("""UNDERFLOW""" )
lowerCAmelCase__ : int = self.array[self.front]
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : int = (self.front + 1) % self.n
self.size -= 1
return temp
| 184 | 0 |
import math
from datetime import datetime, timedelta
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = year % 19
lowercase = year % 4
lowercase = year % 7
lowercase = math.floor(year / 100 )
lowercase = math.floor((13 + 8 * leap_day_inhibits) / 25 )
lowercase = leap_day_inhibits / 4
lowercase = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
lowercase = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
lowercase = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
lowercase = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(snake_case__ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(snake_case__ , 4 , 18 )
else:
return datetime(snake_case__ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
lowercase__ :Dict = "will be" if year > datetime.now().year else "was"
print(F'Easter in {year} {tense} {gauss_easter(year)}')
| 101 | import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def snake_case ( snake_case__ :int , snake_case__ :List[str] , snake_case__ :Union[str, Any]) -> str:
# Initialise PyTorch model
_A = AlbertConfig.from_json_file(snake_case__)
print(F'''Building PyTorch model from configuration: {config}''')
_A = AlbertForPreTraining(snake_case__)
# Load weights from tf checkpoint
load_tf_weights_in_albert(snake_case__ , snake_case__ , snake_case__)
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''')
torch.save(model.state_dict() , snake_case__)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--albert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained ALBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 180 | 0 |
import os
from math import logaa
def lowerCamelCase__ ( a__ : int = "base_exp.txt" ) -> Optional[int]:
UpperCamelCase_ = 0
UpperCamelCase_ = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(_UpperCAmelCase ) , _UpperCAmelCase ) ) ):
UpperCamelCase_ = list(map(_UpperCAmelCase , line.split(""",""" ) ) )
if x * logaa(_UpperCAmelCase ) > largest:
UpperCamelCase_ = x * logaa(_UpperCAmelCase )
UpperCamelCase_ = i + 1
return result
if __name__ == "__main__":
print(solution())
| 365 |
from __future__ import annotations
def lowerCamelCase__ ( a__ : list[list[int]] ) -> int:
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(a__ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(a__ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 261 | 0 |
"""simple docstring"""
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def a__ ( __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
__lowerCAmelCase: Optional[int] = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
__lowerCAmelCase: Optional[int] = MaskFormerConfig(backbone_config=__lowerCAmelCase )
__lowerCAmelCase: Union[str, Any] = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
__lowerCAmelCase: Any = 8_4_7
__lowerCAmelCase: Optional[Any] = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
__lowerCAmelCase: Optional[int] = 1_5_0
__lowerCAmelCase: Dict = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
__lowerCAmelCase: str = 1_7_1
__lowerCAmelCase: Tuple = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
__lowerCAmelCase: Optional[int] = 1_3_3
__lowerCAmelCase: Any = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
__lowerCAmelCase: Optional[Any] = 1_9
__lowerCAmelCase: str = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
__lowerCAmelCase: Tuple = 6_5
__lowerCAmelCase: Any = '''mapillary-vistas-id2label.json'''
__lowerCAmelCase: Tuple = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase: int = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
return config
def a__ ( __SCREAMING_SNAKE_CASE ) -> List[str]:
__lowerCAmelCase: Optional[int] = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm1.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm1.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.relative_position_index", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.proj.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.proj.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm2.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm2.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc1.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc1.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc2.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc2.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((F"backbone.layers.{i}.downsample.reduction.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((F"backbone.layers.{i}.downsample.norm.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((F"backbone.layers.{i}.downsample.norm.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append((F"backbone.norm{i}.weight", F"model.pixel_level_module.encoder.hidden_states_norms.{i}.weight") )
rename_keys.append((F"backbone.norm{i}.bias", F"model.pixel_level_module.encoder.hidden_states_norms.{i}.bias") )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F"sem_seg_head.adapter_{source_index}.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight") )
rename_keys.append((F"sem_seg_head.adapter_{source_index}.norm.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight") )
rename_keys.append((F"sem_seg_head.adapter_{source_index}.norm.bias", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias") )
rename_keys.append((F"sem_seg_head.layer_{source_index}.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight") )
rename_keys.append((F"sem_seg_head.layer_{source_index}.norm.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight") )
rename_keys.append((F"sem_seg_head.layer_{source_index}.norm.bias", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias") )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight", F"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias", F"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias") )
# cross-attention out projection
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight", F"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias", F"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias") )
# MLP 1
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight", F"model.transformer_module.decoder.layers.{idx}.fc1.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias", F"model.transformer_module.decoder.layers.{idx}.fc1.bias") )
# MLP 2
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight", F"model.transformer_module.decoder.layers.{idx}.fc2.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias", F"model.transformer_module.decoder.layers.{idx}.fc2.bias") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight", F"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias", F"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight", F"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias", F"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias") )
# layernorm 3 (final layernorm)
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight", F"model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias", F"model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((F"sem_seg_head.predictor.mask_embed.layers.{i}.weight", F"mask_embedder.{i}.0.weight") )
rename_keys.append((F"sem_seg_head.predictor.mask_embed.layers.{i}.bias", F"mask_embedder.{i}.0.bias") )
# fmt: on
return rename_keys
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
__lowerCAmelCase: Dict = dct.pop(__lowerCAmelCase )
__lowerCAmelCase: Optional[Any] = val
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Any:
__lowerCAmelCase: Optional[int] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowerCAmelCase: List[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowerCAmelCase: str = state_dict.pop(F"backbone.layers.{i}.blocks.{j}.attn.qkv.weight" )
__lowerCAmelCase: Dict = state_dict.pop(F"backbone.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase: Tuple = in_proj_weight[:dim, :]
__lowerCAmelCase: List[str] = in_proj_bias[: dim]
__lowerCAmelCase: List[Any] = in_proj_weight[
dim : dim * 2, :
]
__lowerCAmelCase: str = in_proj_bias[
dim : dim * 2
]
__lowerCAmelCase: Tuple = in_proj_weight[
-dim :, :
]
__lowerCAmelCase: Union[str, Any] = in_proj_bias[-dim :]
# fmt: on
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
# fmt: off
__lowerCAmelCase: Tuple = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowerCAmelCase: List[str] = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight" )
__lowerCAmelCase: Dict = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase: Optional[Any] = in_proj_weight[: hidden_size, :]
__lowerCAmelCase: int = in_proj_bias[:config.hidden_size]
__lowerCAmelCase: int = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowerCAmelCase: str = in_proj_bias[hidden_size : hidden_size * 2]
__lowerCAmelCase: Optional[int] = in_proj_weight[-hidden_size :, :]
__lowerCAmelCase: Optional[int] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowerCAmelCase: Optional[int] = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight" )
__lowerCAmelCase: List[str] = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase: Optional[Any] = in_proj_weight[: hidden_size, :]
__lowerCAmelCase: Dict = in_proj_bias[:config.hidden_size]
__lowerCAmelCase: Optional[int] = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowerCAmelCase: Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2]
__lowerCAmelCase: Union[str, Any] = in_proj_weight[-hidden_size :, :]
__lowerCAmelCase: Any = in_proj_bias[-hidden_size :]
# fmt: on
def a__ ( ) -> torch.Tensor:
__lowerCAmelCase: str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowerCAmelCase: Tuple = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False ) -> Optional[int]:
__lowerCAmelCase: List[str] = get_maskformer_config(__lowerCAmelCase )
# load original state_dict
with open(__lowerCAmelCase , "rb" ) as f:
__lowerCAmelCase: List[Any] = pickle.load(__lowerCAmelCase )
__lowerCAmelCase: Tuple = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__lowerCAmelCase: List[Any] = create_rename_keys(__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_swin_q_k_v(__lowerCAmelCase , config.backbone_config )
read_in_decoder_q_k_v(__lowerCAmelCase , __lowerCAmelCase )
# update to torch tensors
for key, value in state_dict.items():
__lowerCAmelCase: Union[str, Any] = torch.from_numpy(__lowerCAmelCase )
# load 🤗 model
__lowerCAmelCase: Optional[Any] = MaskFormerForInstanceSegmentation(__lowerCAmelCase )
model.eval()
for name, param in model.named_parameters():
print(__lowerCAmelCase , param.shape )
__lowerCAmelCase: Union[str, Any] = model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__lowerCAmelCase ) == 0, F"Unexpected keys: {unexpected_keys}"
# verify results
__lowerCAmelCase: Dict = prepare_img()
if "vistas" in model_name:
__lowerCAmelCase: List[Any] = 6_5
elif "cityscapes" in model_name:
__lowerCAmelCase: Dict = 6_5_5_3_5
else:
__lowerCAmelCase: int = 2_5_5
__lowerCAmelCase: Union[str, Any] = True if '''ade''' in model_name else False
__lowerCAmelCase: Dict = MaskFormerImageProcessor(ignore_index=__lowerCAmelCase , reduce_labels=__lowerCAmelCase )
__lowerCAmelCase: Dict = image_processor(__lowerCAmelCase , return_tensors="pt" )
__lowerCAmelCase: Union[str, Any] = model(**__lowerCAmelCase )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__lowerCAmelCase: str = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"Saving model and image processor to {pytorch_dump_folder_path}" )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
image_processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(F"nielsr/{model_name}" )
image_processor.push_to_hub(F"nielsr/{model_name}" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="maskformer-swin-tiny-ade",
type=str,
help=("Name of the MaskFormer model you'd like to convert",),
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl",
type=str,
help="Path to the original state dict (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__A = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 217 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
__lowerCAmelCase : int = logging.get_logger(__name__)
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : Optional[Any] = ['''pixel_values''']
def __init__( self : Dict , _snake_case : bool = True , _snake_case : Optional[Dict[str, int]] = None , _snake_case : PILImageResampling = PILImageResampling.BILINEAR , _snake_case : bool = True , _snake_case : Dict[str, int] = None , _snake_case : bool = True , _snake_case : Union[int, float] = 1 / 255 , _snake_case : bool = True , _snake_case : Optional[Union[float, List[float]]] = None , _snake_case : Optional[Union[float, List[float]]] = None , **_snake_case : Tuple , ):
super().__init__(**_snake_case )
__lowercase : List[Any] = size if size is not None else {'''shortest_edge''': 256}
__lowercase : Dict = get_size_dict(_snake_case , default_to_square=_snake_case )
__lowercase : Dict = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__lowercase : Any = get_size_dict(_snake_case , param_name='''crop_size''' )
__lowercase : int = do_resize
__lowercase : Union[str, Any] = size
__lowercase : Optional[int] = resample
__lowercase : str = do_center_crop
__lowercase : str = crop_size
__lowercase : Optional[int] = do_rescale
__lowercase : str = rescale_factor
__lowercase : List[Any] = do_normalize
__lowercase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowercase : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case_ ( self : List[str] , _snake_case : np.ndarray , _snake_case : Dict[str, int] , _snake_case : PILImageResampling = PILImageResampling.BICUBIC , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : Any , ):
__lowercase : Union[str, Any] = get_size_dict(_snake_case , default_to_square=_snake_case )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
__lowercase : Dict = get_resize_output_image_size(_snake_case , size=size['''shortest_edge'''] , default_to_square=_snake_case )
return resize(_snake_case , size=_snake_case , resample=_snake_case , data_format=_snake_case , **_snake_case )
def snake_case_ ( self : Union[str, Any] , _snake_case : np.ndarray , _snake_case : Dict[str, int] , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : str , ):
__lowercase : str = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(_snake_case , size=(size['''height'''], size['''width''']) , data_format=_snake_case , **_snake_case )
def snake_case_ ( self : str , _snake_case : np.ndarray , _snake_case : float , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : Any ):
return rescale(_snake_case , scale=_snake_case , data_format=_snake_case , **_snake_case )
def snake_case_ ( self : Any , _snake_case : np.ndarray , _snake_case : Union[float, List[float]] , _snake_case : Union[float, List[float]] , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : str , ):
return normalize(_snake_case , mean=_snake_case , std=_snake_case , data_format=_snake_case , **_snake_case )
def snake_case_ ( self : Optional[Any] , _snake_case : ImageInput , _snake_case : Optional[bool] = None , _snake_case : Dict[str, int] = None , _snake_case : PILImageResampling = None , _snake_case : bool = None , _snake_case : Dict[str, int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[float] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[Union[float, List[float]]] = None , _snake_case : Optional[Union[float, List[float]]] = None , _snake_case : Optional[Union[str, TensorType]] = None , _snake_case : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_snake_case : int , ):
__lowercase : List[str] = do_resize if do_resize is not None else self.do_resize
__lowercase : str = size if size is not None else self.size
__lowercase : Any = get_size_dict(_snake_case , default_to_square=_snake_case )
__lowercase : str = resample if resample is not None else self.resample
__lowercase : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase : List[str] = crop_size if crop_size is not None else self.crop_size
__lowercase : int = get_size_dict(_snake_case , param_name='''crop_size''' )
__lowercase : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
__lowercase : str = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase : Any = do_normalize if do_normalize is not None else self.do_normalize
__lowercase : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
__lowercase : Dict = image_std if image_std is not None else self.image_std
__lowercase : Any = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__lowercase : List[str] = [to_numpy_array(_snake_case ) for image in images]
if do_resize:
__lowercase : str = [self.resize(image=_snake_case , size=_snake_case , resample=_snake_case ) for image in images]
if do_center_crop:
__lowercase : Dict = [self.center_crop(image=_snake_case , size=_snake_case ) for image in images]
if do_rescale:
__lowercase : List[str] = [self.rescale(image=_snake_case , scale=_snake_case ) for image in images]
if do_normalize:
__lowercase : str = [self.normalize(image=_snake_case , mean=_snake_case , std=_snake_case ) for image in images]
__lowercase : Dict = [to_channel_dimension_format(_snake_case , _snake_case ) for image in images]
__lowercase : List[str] = {'''pixel_values''': images}
return BatchFeature(data=_snake_case , tensor_type=_snake_case )
def snake_case_ ( self : Optional[int] , _snake_case : str , _snake_case : List[Tuple] = None ):
__lowercase : Tuple = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_snake_case ) != len(_snake_case ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_snake_case ):
__lowercase : str = target_sizes.numpy()
__lowercase : Union[str, Any] = []
for idx in range(len(_snake_case ) ):
__lowercase : Union[str, Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_snake_case )
__lowercase : Tuple = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_snake_case )
else:
__lowercase : str = logits.argmax(dim=1 )
__lowercase : List[str] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 156 | 0 |
'''simple docstring'''
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(
description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'
)
parser.add_argument(
'--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.'
)
parser.add_argument(
'--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.'
)
parser.add_argument('--vocab_size', default=3_0_5_2_2, type=int)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
logger.info(f'Loading data from {args.data_file}')
with open(args.data_file, 'rb') as fp:
SCREAMING_SNAKE_CASE__ = pickle.load(fp)
logger.info('Counting occurrences for MLM.')
SCREAMING_SNAKE_CASE__ = Counter()
for tk_ids in data:
counter.update(tk_ids)
SCREAMING_SNAKE_CASE__ = [0] * args.vocab_size
for k, v in counter.items():
SCREAMING_SNAKE_CASE__ = v
logger.info(f'Dump to {args.token_counts_dump}')
with open(args.token_counts_dump, 'wb') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 359 |
'''simple docstring'''
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def lowercase__ ( __UpperCamelCase )-> str:
return "".join(sorted(__UpperCamelCase ) )
def lowercase__ ( __UpperCamelCase )-> list[str]:
return word_by_signature[signature(__UpperCamelCase )]
SCREAMING_SNAKE_CASE__ = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
SCREAMING_SNAKE_CASE__ = sorted({word.strip().lower() for word in data.splitlines()})
SCREAMING_SNAKE_CASE__ = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 183 | 0 |
"""simple docstring"""
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
a : str = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] ) ->Any:
'''simple docstring'''
a : List[Any] = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(_lowercase , _lowercase )
a : Union[str, Any] = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def _SCREAMING_SNAKE_CASE ( _lowercase : Dict ) ->int:
'''simple docstring'''
a : str = list(s_dict.keys() )
for key in keys:
a : Any = key
for k, v in WHISPER_MAPPING.items():
if k in key:
a : List[Any] = new_key.replace(_lowercase , _lowercase )
print(F"""{key} -> {new_key}""" )
a : Union[str, Any] = s_dict.pop(_lowercase )
return s_dict
def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] ) ->List[str]:
'''simple docstring'''
a, a : int = emb.weight.shape
a : Optional[int] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
a : Any = emb.weight.data
return lin_layer
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : str ) ->bytes:
'''simple docstring'''
os.makedirs(_lowercase , exist_ok=_lowercase )
a : Union[str, Any] = os.path.basename(_lowercase )
a : List[str] = url.split("/" )[-2]
a : int = os.path.join(_lowercase , _lowercase )
if os.path.exists(_lowercase ) and not os.path.isfile(_lowercase ):
raise RuntimeError(F"""{download_target} exists and is not a regular file""" )
if os.path.isfile(_lowercase ):
a : Optional[int] = open(_lowercase , "rb" ).read()
if hashlib.shaaaa(_lowercase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(_lowercase ) as source, open(_lowercase , "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=_lowercase , unit_divisor=1024 ) as loop:
while True:
a : int = source.read(8192 )
if not buffer:
break
output.write(_lowercase )
loop.update(len(_lowercase ) )
a : Optional[Any] = open(_lowercase , "rb" ).read()
if hashlib.shaaaa(_lowercase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] , _lowercase : Union[str, Any] ) ->str:
'''simple docstring'''
if ".pt" not in checkpoint_path:
a : List[Any] = _download(_MODELS[checkpoint_path] )
else:
a : Optional[Any] = torch.load(_lowercase , map_location="cpu" )
a : Tuple = original_checkpoint["dims"]
a : Dict = original_checkpoint["model_state_dict"]
a : Tuple = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(_lowercase )
rename_keys(_lowercase )
a : Any = True
a : Tuple = state_dict["decoder.layers.0.fc1.weight"].shape[0]
a : Optional[int] = WhisperConfig(
vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=_lowercase , decoder_ffn_dim=_lowercase , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , )
a : str = WhisperForConditionalGeneration(_lowercase )
a, a : Tuple = model.model.load_state_dict(_lowercase , strict=_lowercase )
if len(_lowercase ) > 0 and not set(_lowercase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
a : List[str] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
a : Dict = proj_out_weights
model.save_pretrained(_lowercase )
if __name__ == "__main__":
a : int = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
a : Any = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 105 |
def __UpperCamelCase ( _lowerCAmelCase ) -> list:
"""simple docstring"""
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(_lowerCAmelCase ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 116 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
snake_case_ : int = False
@skip_mps
class __snake_case ( a , a , a , unittest.TestCase ):
UpperCAmelCase__ : Any = StableDiffusionAttendAndExcitePipeline
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Tuple = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__ : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
UpperCAmelCase__ : str = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase__ : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def lowerCamelCase ( cls : Optional[int]):
"""simple docstring"""
super().setUpClass()
torch.use_deterministic_algorithms(_snake_case)
@classmethod
def lowerCamelCase ( cls : Optional[Any]):
"""simple docstring"""
super().tearDownClass()
torch.use_deterministic_algorithms(_snake_case)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_snake_case , )
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0)
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
UpperCAmelCase_ = CLIPTextModel(_snake_case)
UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCAmelCase_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase ( self : Dict , _snake_case : Optional[int] , _snake_case : List[Any]=0):
"""simple docstring"""
if str(_snake_case).startswith('''mps'''):
UpperCAmelCase_ = torch.manual_seed(_snake_case)
else:
UpperCAmelCase_ = torch.Generator(device=_snake_case).manual_seed(_snake_case)
UpperCAmelCase_ = UpperCAmelCase_ = {
'''prompt''': '''a cat and a frog''',
'''token_indices''': [2, 5],
'''generator''': generator,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''max_iter_to_alter''': 2,
'''thresholds''': {0: 0.7},
}
return inputs
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = '''cpu'''
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**_snake_case)
pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = pipe(**_snake_case).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3))
UpperCAmelCase_ = np.array(
[0.6_3_9_0_5_3_6_4, 0.6_2_8_9_7_3_0_7, 0.4_8_5_9_9_0_1_7, 0.5_1_3_3_6_2_4, 0.5_5_5_0_0_4_8, 0.4_5_7_6_9_5_1_6, 0.5_0_3_2_6_9_7_3, 0.5_0_2_3_1_3_9, 0.4_5_3_8_4_4_9_6])
UpperCAmelCase_ = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(_snake_case , 1e-3)
def lowerCamelCase ( self : str):
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2])
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3)
def lowerCamelCase ( self : int):
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=5e-4)
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=4e-4)
@require_torch_gpu
@slow
class __snake_case ( unittest.TestCase ):
@classmethod
def lowerCamelCase ( cls : Any):
"""simple docstring"""
super().setUpClass()
torch.use_deterministic_algorithms(_snake_case)
@classmethod
def lowerCamelCase ( cls : Tuple):
"""simple docstring"""
super().tearDownClass()
torch.use_deterministic_algorithms(_snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = torch.manual_seed(51)
UpperCAmelCase_ = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , safety_checker=_snake_case , torch_dtype=torch.floataa)
pipe.to('''cuda''')
UpperCAmelCase_ = '''a painting of an elephant with glasses'''
UpperCAmelCase_ = [5, 7]
UpperCAmelCase_ = pipe(
prompt=_snake_case , token_indices=_snake_case , guidance_scale=7.5 , generator=_snake_case , num_inference_steps=5 , max_iter_to_alter=5 , output_type='''numpy''' , ).images[0]
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy''')
assert np.abs((expected_image - image).max()) < 5e-1
| 7 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case ( unittest.TestCase ):
def __init__( self : Tuple , _snake_case : List[Any] , _snake_case : Dict=3 , _snake_case : Dict=32 , _snake_case : List[str]=3 , _snake_case : Union[str, Any]=10 , _snake_case : Tuple=[10, 20, 30, 40] , _snake_case : Dict=[1, 1, 2, 1] , _snake_case : List[Any]=True , _snake_case : Dict=True , _snake_case : Union[str, Any]="relu" , _snake_case : Tuple=3 , _snake_case : Union[str, Any]=None , ):
"""simple docstring"""
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embeddings_size
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = depths
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = scope
UpperCAmelCase_ = len(_snake_case)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCAmelCase_ = self.get_config()
return config, pixel_values
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCamelCase ( self : Optional[int] , _snake_case : List[Any] , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = FlaxRegNetModel(config=_snake_case)
UpperCAmelCase_ = model(_snake_case)
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase ( self : Optional[Any] , _snake_case : List[Any] , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = FlaxRegNetForImageClassification(config=_snake_case)
UpperCAmelCase_ = model(_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : int = False
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = FlaxRegNetModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
return
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case)
@unittest.skip(reason='''RegNet does not use inputs_embeds''')
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''')
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
pass
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_snake_case)
UpperCAmelCase_ = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _snake_case)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
def check_hidden_states_output(_snake_case : List[str] , _snake_case : Dict , _snake_case : List[str]):
UpperCAmelCase_ = model_class(_snake_case)
UpperCAmelCase_ = model(**self._prepare_for_class(_snake_case , _snake_case))
UpperCAmelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ = self.model_tester.num_stages
self.assertEqual(len(_snake_case) , expected_num_stages + 1)
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
UpperCAmelCase_ = self._prepare_for_class(_snake_case , _snake_case)
UpperCAmelCase_ = model_class(_snake_case)
@jax.jit
def model_jitted(_snake_case : str , **_snake_case : Union[str, Any]):
return model(pixel_values=_snake_case , **_snake_case)
with self.subTest('''JIT Enabled'''):
UpperCAmelCase_ = model_jitted(**_snake_case).to_tuple()
with self.subTest('''JIT Disabled'''):
with jax.disable_jit():
UpperCAmelCase_ = model_jitted(**_snake_case).to_tuple()
self.assertEqual(len(_snake_case) , len(_snake_case))
for jitted_output, output in zip(_snake_case , _snake_case):
self.assertEqual(jitted_output.shape , output.shape)
def A () -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class __snake_case ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self : Dict):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''') if is_vision_available() else None
@slow
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''')
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_snake_case , return_tensors='''np''')
UpperCAmelCase_ = model(**_snake_case)
# verify the logits
UpperCAmelCase_ = (1, 1000)
self.assertEqual(outputs.logits.shape , _snake_case)
UpperCAmelCase_ = jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6])
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _snake_case , atol=1e-4))
| 7 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : Tuple , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : str = WavaVecaForSequenceClassification.from_pretrained(snake_case__ , config=snake_case__ )
_snake_case : List[Any] = downstream_dict["""projector.weight"""]
_snake_case : Optional[Any] = downstream_dict["""projector.bias"""]
_snake_case : int = downstream_dict["""model.post_net.linear.weight"""]
_snake_case : List[str] = downstream_dict["""model.post_net.linear.bias"""]
return model
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : List[str] = WavaVecaForAudioFrameClassification.from_pretrained(snake_case__ , config=snake_case__ )
_snake_case : Optional[int] = downstream_dict["""model.linear.weight"""]
_snake_case : Dict = downstream_dict["""model.linear.bias"""]
return model
def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : str ):
"""simple docstring"""
_snake_case : List[Any] = WavaVecaForXVector.from_pretrained(snake_case__ , config=snake_case__ )
_snake_case : Optional[Any] = downstream_dict["""connector.weight"""]
_snake_case : Optional[int] = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_snake_case : Tuple = downstream_dict[
F"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
_snake_case : str = downstream_dict[F"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
_snake_case : Any = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
_snake_case : Union[str, Any] = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
_snake_case : List[Any] = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
_snake_case : Dict = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
_snake_case : List[str] = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Dict ):
"""simple docstring"""
_snake_case : List[str] = torch.load(snake_case__ , map_location="""cpu""" )
_snake_case : Union[str, Any] = checkpoint["""Downstream"""]
_snake_case : Optional[Any] = WavaVecaConfig.from_pretrained(snake_case__ )
_snake_case : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
snake_case__ , return_attention_mask=snake_case__ , do_normalize=snake_case__ )
_snake_case : Optional[int] = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
_snake_case : Optional[Any] = convert_classification(snake_case__ , snake_case__ , snake_case__ )
elif arch.endswith("""ForAudioFrameClassification""" ):
_snake_case : Dict = convert_diarization(snake_case__ , snake_case__ , snake_case__ )
elif arch.endswith("""ForXVector""" ):
_snake_case : List[str] = convert_xvector(snake_case__ , snake_case__ , snake_case__ )
else:
raise NotImplementedError(F"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
_snake_case : Optional[int] = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(snake_case__ )
hf_model.save_pretrained(snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
A_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 64 |
class _lowercase :
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ : List[str] = n
lowerCamelCase__ : Union[str, Any] = [None] * self.n
lowerCamelCase__ : List[str] = 0 # index of the first element
lowerCamelCase__ : Any = 0
lowerCamelCase__ : Any = 0
def __len__( self : Tuple ):
'''simple docstring'''
return self.size
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return self.size == 0
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def lowerCAmelCase ( self : str , __lowerCamelCase : List[str] ):
'''simple docstring'''
if self.size >= self.n:
raise Exception("QUEUE IS FULL" )
lowerCamelCase__ : Optional[Any] = data
lowerCamelCase__ : Tuple = (self.rear + 1) % self.n
self.size += 1
return self
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
if self.size == 0:
raise Exception("UNDERFLOW" )
lowerCamelCase__ : Any = self.array[self.front]
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : str = (self.front + 1) % self.n
self.size -= 1
return temp
| 184 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=A_ )
class _SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ = field(default='image-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
lowerCAmelCase__ = Features({'image': Image()} )
lowerCAmelCase__ = Features({'labels': ClassLabel} )
lowerCAmelCase__ = "image"
lowerCAmelCase__ = "labels"
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Tuple:
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , snake_case__ ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
lowerCamelCase_ = copy.deepcopy(self )
lowerCamelCase_ = self.label_schema.copy()
lowerCamelCase_ = features[self.label_column]
lowerCamelCase_ = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE_( self ) -> Dict[str, str]:
return {
self.image_column: "image",
self.label_column: "labels",
}
| 360 |
import math
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
return math.pow(lowerCamelCase__ , 2 ) - a
def lowerCamelCase_ ( lowerCamelCase__ ):
return 2 * x
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = 2.0
while start <= a:
lowerCamelCase_ = math.pow(lowerCamelCase__ , 2 )
return start
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ = 9_9_9_9 , lowerCamelCase__ = 0.00_00_00_00_00_00_01 ):
if a < 0:
raise ValueError("math domain error" )
lowerCamelCase_ = get_initial_point(lowerCamelCase__ )
for _ in range(lowerCamelCase__ ):
lowerCamelCase_ = value
lowerCamelCase_ = value - fx(lowerCamelCase__ , lowerCamelCase__ ) / fx_derivative(lowerCamelCase__ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 47 | 0 |
def a ( snake_case__: list ):
'''simple docstring'''
for i in range(len(snake_case__ ) - 1 , 0 , -1 ):
lowercase_ = False
for j in range(snake_case__ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowercase_ , lowercase_ = unsorted[j - 1], unsorted[j]
lowercase_ = True
for j in range(snake_case__ ):
if unsorted[j] > unsorted[j + 1]:
lowercase_ , lowercase_ = unsorted[j + 1], unsorted[j]
lowercase_ = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__a = input('Enter numbers separated by a comma:\n').strip()
__a = [int(item) for item in user_input.split(',')]
print(f"{cocktail_shaker_sort(unsorted) = }")
| 30 | """simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class snake_case__ ( snake_case_, snake_case_ ):
@register_to_config
def __init__( self , lowerCamelCase = 768 , ):
super().__init__()
__a = nn.Parameter(torch.zeros(1 , lowerCamelCase ) )
__a = nn.Parameter(torch.ones(1 , lowerCamelCase ) )
def a__ ( self , lowerCamelCase = None , lowerCamelCase = None , ):
__a = nn.Parameter(self.mean.to(lowerCamelCase ).to(lowerCamelCase ) )
__a = nn.Parameter(self.std.to(lowerCamelCase ).to(lowerCamelCase ) )
return self
def a__ ( self , lowerCamelCase ):
__a = (embeds - self.mean) * 1.0 / self.std
return embeds
def a__ ( self , lowerCamelCase ):
__a = (embeds * self.std) + self.mean
return embeds
| 261 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _snake_case ( self : int ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
A: Union[str, Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def _snake_case ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
A: Dict = self.dummy_uncond_unet
A: Any = ScoreSdeVeScheduler()
A: Any = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
sde_ve.to(SCREAMING_SNAKE_CASE_ )
sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
A: Any = torch.manual_seed(0 )
A: int = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE_ ).images
A: List[str] = torch.manual_seed(0 )
A: List[Any] = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )[
0
]
A: Union[str, Any] = image[0, -3:, -3:, -1]
A: Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A: int = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Any ) -> Tuple:
'''simple docstring'''
A: Dict = '''google/ncsnpp-church-256'''
A: Any = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
A: Dict = ScoreSdeVeScheduler.from_pretrained(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
sde_ve.to(SCREAMING_SNAKE_CASE_ )
sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
A: Optional[int] = torch.manual_seed(0 )
A: Tuple = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE_ ).images
A: Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
A: Optional[int] = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 360 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def SCREAMING_SNAKE_CASE( __lowercase ) -> Dict:
return np.maximum(0 , __lowercase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 334 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase : List[str] = logging.get_logger(__name__)
__lowercase : Dict = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "distilbert"
A_ = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__( self , __a=3_0522 , __a=512 , __a=False , __a=6 , __a=12 , __a=768 , __a=4 * 768 , __a=0.1 , __a=0.1 , __a="gelu" , __a=0.02 , __a=0.1 , __a=0.2 , __a=0 , **__a , ):
'''simple docstring'''
__a : Any = vocab_size
__a : List[str] = max_position_embeddings
__a : Optional[Any] = sinusoidal_pos_embds
__a : int = n_layers
__a : Optional[Any] = n_heads
__a : Optional[int] = dim
__a : Optional[int] = hidden_dim
__a : Optional[Any] = dropout
__a : Tuple = attention_dropout
__a : Dict = activation
__a : List[str] = initializer_range
__a : Optional[Any] = qa_dropout
__a : Optional[int] = seq_classif_dropout
super().__init__(**__a , pad_token_id=__a )
class __UpperCamelCase ( lowerCAmelCase_ ):
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
__a : Dict = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__a : Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 27 |
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class a ( __snake_case ):
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : str = "▁" , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Union[str, AddedToken] = "<unk>" , __SCREAMING_SNAKE_CASE : Union[str, AddedToken] = "</s>" , __SCREAMING_SNAKE_CASE : Union[str, AddedToken] = "<pad>" , ) -> Optional[int]:
lowerCamelCase_ = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
lowerCamelCase_ = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
lowerCamelCase_ = token_dict['token']
lowerCamelCase_ = Tokenizer(Unigram() )
lowerCamelCase_ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) , ' ' ),
normalizers.Lowercase(),
] )
lowerCamelCase_ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE ),
pre_tokenizers.Digits(individual_digits=__SCREAMING_SNAKE_CASE ),
pre_tokenizers.Punctuation(),
] )
lowerCamelCase_ = decoders.Metaspace(replacement=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = TemplateProcessing(
single=F'''$A {self.special_tokens["eos"]["token"]}''' , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
lowerCamelCase_ = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : int = 8000 , __SCREAMING_SNAKE_CASE : bool = True , ) -> Tuple:
lowerCamelCase_ = trainers.UnigramTrainer(
vocab_size=__SCREAMING_SNAKE_CASE , special_tokens=self.special_tokens_list , show_progress=__SCREAMING_SNAKE_CASE , )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowerCamelCase_ = [files]
self._tokenizer.train(__SCREAMING_SNAKE_CASE , trainer=__SCREAMING_SNAKE_CASE )
self.add_unk_id()
def UpperCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[Iterator[str], Iterator[Iterator[str]]] , __SCREAMING_SNAKE_CASE : int = 8000 , __SCREAMING_SNAKE_CASE : bool = True , ) -> str:
lowerCamelCase_ = trainers.UnigramTrainer(
vocab_size=__SCREAMING_SNAKE_CASE , special_tokens=self.special_tokens_list , show_progress=__SCREAMING_SNAKE_CASE , )
self._tokenizer.train_from_iterator(__SCREAMING_SNAKE_CASE , trainer=__SCREAMING_SNAKE_CASE )
self.add_unk_id()
def UpperCamelCase ( self : List[Any] ) -> Dict:
lowerCamelCase_ = json.loads(self._tokenizer.to_str() )
lowerCamelCase_ = self.special_tokens['unk']['id']
lowerCamelCase_ = Tokenizer.from_str(json.dumps(__SCREAMING_SNAKE_CASE ) )
| 183 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
a__ : str = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
a__ : Optional[int] = TaTokenizerFast
a__ : str = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
'''MT5EncoderModel''',
'''MT5ForConditionalGeneration''',
'''MT5ForQuestionAnswering''',
'''MT5Model''',
'''MT5PreTrainedModel''',
'''MT5Stack''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model''']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
a__ : Optional[Any] = _LazyModule(
__name__,
globals()['''__file__'''],
_import_structure,
extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast},
module_spec=__spec__,
)
| 364 |
"""simple docstring"""
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
a__ : Optional[Any] = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : int = CLIPConfig
snake_case__ : str = ["CLIPEncoderLayer"]
def __init__( self : Optional[int] , UpperCAmelCase__ : CLIPConfig ) -> Dict:
super().__init__(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = CLIPVisionModelWithProjection(config.vision_config )
__SCREAMING_SNAKE_CASE = nn.Linear(config.vision_config.projection_dim , 1 )
__SCREAMING_SNAKE_CASE = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : int=0.5 , UpperCAmelCase__ : Optional[int]=0.5 ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.vision_model(UpperCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE = self.p_head(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = nsfw_detected.flatten()
__SCREAMING_SNAKE_CASE = nsfw_detected > p_threshold
__SCREAMING_SNAKE_CASE = nsfw_detected.tolist()
if any(UpperCAmelCase__ ):
logger.warning(
"Potential NSFW content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, nsfw_detected_ in enumerate(UpperCAmelCase__ ):
if nsfw_detected_:
__SCREAMING_SNAKE_CASE = np.zeros(images[idx].shape )
__SCREAMING_SNAKE_CASE = self.w_head(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = watermark_detected.flatten()
__SCREAMING_SNAKE_CASE = watermark_detected > w_threshold
__SCREAMING_SNAKE_CASE = watermark_detected.tolist()
if any(UpperCAmelCase__ ):
logger.warning(
"Potential watermarked content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, watermark_detected_ in enumerate(UpperCAmelCase__ ):
if watermark_detected_:
__SCREAMING_SNAKE_CASE = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 195 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
lowercase_ = False
@skip_mps
class A ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = StableDiffusionAttendAndExcitePipeline
lowerCamelCase = False
lowerCamelCase = TEXT_TO_IMAGE_PARAMS
lowerCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS.union({'token_indices'} )
lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def snake_case__ ( cls : Any )-> Optional[Any]:
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(lowercase_ )
@classmethod
def snake_case__ ( cls : Optional[Any] )-> Dict:
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(lowercase_ )
def snake_case__ ( self : List[str] )-> int:
'''simple docstring'''
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4),layers_per_block=1,sample_size=3_2,in_channels=4,out_channels=4,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'),up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'),cross_attention_dim=3_2,attention_head_dim=(2, 4),use_linear_projection=lowercase_,)
A__ = DDIMScheduler(
beta_start=0.00_085,beta_end=0.012,beta_schedule='scaled_linear',clip_sample=lowercase_,set_alpha_to_one=lowercase_,)
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[3_2, 6_4],in_channels=3,out_channels=3,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'],up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'],latent_channels=4,sample_size=1_2_8,)
torch.manual_seed(0 )
A__ = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=3_2,intermediate_size=3_7,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1_0_0_0,hidden_act='gelu',projection_dim=5_1_2,)
A__ = CLIPTextModel(lowercase_ )
A__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def snake_case__ ( self : Tuple,lowercase_ : str,lowercase_ : List[Any]=0 )-> int:
'''simple docstring'''
if str(lowercase_ ).startswith('mps' ):
A__ = torch.manual_seed(lowercase_ )
else:
A__ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
A__ = A__ = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def snake_case__ ( self : List[str] )-> Optional[Any]:
'''simple docstring'''
A__ = 'cpu'
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
A__ = self.get_dummy_inputs(lowercase_ )
A__ = pipe(**lowercase_ ).images
A__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape,(1, 6_4, 6_4, 3) )
A__ = np.array(
[0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] )
A__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowercase_,1E-3 )
def snake_case__ ( self : str )-> Optional[Any]:
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def snake_case__ ( self : str )-> int:
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def snake_case__ ( self : str )-> Optional[int]:
'''simple docstring'''
self._test_inference_batch_single_identical(batch_size=2,expected_max_diff=7E-4 )
def snake_case__ ( self : Optional[Any] )-> int:
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def snake_case__ ( self : Union[str, Any] )-> str:
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def snake_case__ ( self : Dict )-> Any:
'''simple docstring'''
super().test_save_load_local(expected_max_difference=5E-4 )
def snake_case__ ( self : Dict )-> List[str]:
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class A ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def snake_case__ ( cls : Any )-> Optional[int]:
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(lowercase_ )
@classmethod
def snake_case__ ( cls : int )-> List[Any]:
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(lowercase_ )
def snake_case__ ( self : List[Any] )-> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self : Union[str, Any] )-> List[Any]:
'''simple docstring'''
A__ = torch.manual_seed(5_1 )
A__ = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4',safety_checker=lowercase_,torch_dtype=torch.floataa )
pipe.to('cuda' )
A__ = 'a painting of an elephant with glasses'
A__ = [5, 7]
A__ = pipe(
prompt=lowercase_,token_indices=lowercase_,guidance_scale=7.5,generator=lowercase_,num_inference_steps=5,max_iter_to_alter=5,output_type='numpy',).images[0]
A__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 7 |
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple:
'''simple docstring'''
return {key.lstrip('-' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def _snake_case( ) -> Dict:
'''simple docstring'''
A__ = ArgumentParser(
'HuggingFace Datasets CLI tool' , usage='datasets-cli <command> [<args>]' , allow_abbrev=SCREAMING_SNAKE_CASE__ )
A__ = parser.add_subparsers(help='datasets-cli command helpers' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(SCREAMING_SNAKE_CASE__ )
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE__ )
TestCommand.register_subcommand(SCREAMING_SNAKE_CASE__ )
RunBeamCommand.register_subcommand(SCREAMING_SNAKE_CASE__ )
DummyDataCommand.register_subcommand(SCREAMING_SNAKE_CASE__ )
# Parse args
A__ , A__ = parser.parse_known_args()
if not hasattr(SCREAMING_SNAKE_CASE__ , 'func' ):
parser.print_help()
exit(1 )
A__ = parse_unknown_args(SCREAMING_SNAKE_CASE__ )
# Run
A__ = args.func(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
service.run()
if __name__ == "__main__":
main()
| 7 | 1 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase_ ( _a):
lowerCamelCase__ = ['image_processor', 'tokenizer']
lowerCamelCase__ = 'CLIPImageProcessor'
lowerCamelCase__ = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self, __a=None, __a=None, **__a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead.", _a, )
_lowerCAmelCase : Tuple = kwargs.pop("feature_extractor")
_lowerCAmelCase : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(_a, _a)
def __call__( self, __a=None, __a=None, __a=None, **__a):
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none.")
if text is not None:
_lowerCAmelCase : int = self.tokenizer(_a, return_tensors=_a, **_a)
if images is not None:
_lowerCAmelCase : Optional[Any] = self.image_processor(_a, return_tensors=_a, **_a)
if text is not None and images is not None:
_lowerCAmelCase : Dict = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_a), tensor_type=_a)
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
return self.tokenizer.batch_decode(*_a, **_a)
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
return self.tokenizer.decode(*_a, **_a)
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer.model_input_names
_lowerCAmelCase : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 370 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCAmelCase_ ( a):
@staticmethod
@abstractmethod
def snake_case__ ( __a):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def snake_case__ ( self):
'''simple docstring'''
raise NotImplementedError()
| 300 | 0 |