code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Tuple , lowercase__ : Optional[Any] , lowercase__ : Optional[Any]=1_3 , lowercase__ : List[Any]=3_2 , lowercase__ : List[str]=3 , lowercase__ : List[str]=4 , lowercase__ : Dict=[1_0, 2_0, 3_0, 4_0] , lowercase__ : List[str]=[2, 2, 3, 2] , lowercase__ : List[str]=True , lowercase__ : Optional[int]=True , lowercase__ : Optional[int]=3_7 , lowercase__ : Union[str, Any]="gelu" , lowercase__ : Optional[int]=1_0 , lowercase__ : Dict=0.0_2 , lowercase__ : Dict=["stage2", "stage3", "stage4"] , lowercase__ : List[str]=[2, 3, 4] , lowercase__ : Optional[int]=None , ):
__lowercase : List[str] = parent
__lowercase : int = batch_size
__lowercase : int = image_size
__lowercase : Optional[int] = num_channels
__lowercase : Optional[int] = num_stages
__lowercase : List[Any] = hidden_sizes
__lowercase : Optional[Any] = depths
__lowercase : int = is_training
__lowercase : int = use_labels
__lowercase : Optional[int] = intermediate_size
__lowercase : Optional[Any] = hidden_act
__lowercase : Any = num_labels
__lowercase : str = initializer_range
__lowercase : List[str] = out_features
__lowercase : Tuple = out_indices
__lowercase : List[str] = scope
def snake_case ( self : Optional[int] ):
__lowercase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : Optional[Any] = None
if self.use_labels:
__lowercase : str = ids_tensor([self.batch_size] , self.num_labels )
__lowercase : Dict = self.get_config()
return config, pixel_values, labels
def snake_case ( self : List[str] ):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowercase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def snake_case ( self : List[str] , lowercase__ : Optional[Any] , lowercase__ : int , lowercase__ : Union[str, Any] ):
__lowercase : Optional[int] = ConvNextVaModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase : List[Any] = model(lowercase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def snake_case ( self : List[str] , lowercase__ : Optional[Any] , lowercase__ : List[Any] , lowercase__ : Union[str, Any] ):
__lowercase : Any = ConvNextVaForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase : Tuple = model(lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self : Optional[int] , lowercase__ : Optional[int] , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] ):
__lowercase : List[Any] = ConvNextVaBackbone(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase : Any = model(lowercase__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__lowercase : Optional[Any] = None
__lowercase : Dict = ConvNextVaBackbone(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase : List[Any] = model(lowercase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def snake_case ( self : Any ):
__lowercase : Union[str, Any] = self.prepare_config_and_inputs()
__lowercase ,__lowercase ,__lowercase : int = config_and_inputs
__lowercase : int = {"pixel_values": pixel_values}
return config, inputs_dict
def snake_case ( self : int ):
__lowercase : str = self.prepare_config_and_inputs()
__lowercase ,__lowercase ,__lowercase : int = config_and_inputs
__lowercase : List[str] = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : Tuple = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Optional[Any] = False
def snake_case ( self : Optional[int] ):
__lowercase : Union[str, Any] = ConvNextVaModelTester(self )
__lowercase : Dict = ConfigTester(self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=3_7 )
def snake_case ( self : Optional[Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self : List[Any] ):
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def snake_case ( self : List[Any] ):
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def snake_case ( self : Optional[Any] ):
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def snake_case ( self : int ):
pass
def snake_case ( self : Any ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__lowercase ,__lowercase : Dict = self.model_tester.prepare_config_and_inputs_with_labels()
__lowercase : Any = True
if model_class.__name__ in [
*get_values(lowercase__ ),
*get_values(lowercase__ ),
]:
continue
__lowercase : Tuple = model_class(lowercase__ )
model.to(lowercase__ )
model.train()
__lowercase : List[Any] = self._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__ )
__lowercase : Any = model(**lowercase__ ).loss
loss.backward()
def snake_case ( self : Any ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__lowercase ,__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
__lowercase : Optional[Any] = False
__lowercase : Union[str, Any] = True
if (
model_class.__name__
in [*get_values(lowercase__ ), *get_values(lowercase__ )]
or not model_class.supports_gradient_checkpointing
):
continue
__lowercase : Tuple = model_class(lowercase__ )
model.to(lowercase__ )
model.gradient_checkpointing_enable()
model.train()
__lowercase : Optional[int] = self._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__ )
__lowercase : List[str] = model(**lowercase__ ).loss
loss.backward()
def snake_case ( self : Optional[int] ):
__lowercase ,__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Optional[int] = model_class(lowercase__ )
__lowercase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : Dict = [*signature.parameters.keys()]
__lowercase : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase__ )
def snake_case ( self : Union[str, Any] ):
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def snake_case ( self : Dict ):
def check_hidden_states_output(lowercase__ : Optional[int] , lowercase__ : List[str] , lowercase__ : Optional[Any] ):
__lowercase : List[Any] = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__lowercase : int = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
__lowercase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase : List[Any] = self.model_tester.num_stages
self.assertEqual(len(lowercase__ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowercase ,__lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Tuple = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase : Optional[Any] = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
def snake_case ( self : Dict ):
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__ )
@slow
def snake_case ( self : Any ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : List[str] = ConvNextVaModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def snake_case__ ( ) ->Optional[int]:
"""simple docstring"""
__lowercase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : Dict ):
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def snake_case ( self : int ):
__lowercase : int = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(lowercase__ )
__lowercase : List[str] = self.default_image_processor
__lowercase : Dict = prepare_img()
__lowercase : Union[str, Any] = preprocessor(images=lowercase__ , return_tensors="pt" ).to(lowercase__ )
# forward pass
with torch.no_grad():
__lowercase : Dict = model(**lowercase__ )
# verify the logits
__lowercase : Tuple = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowercase__ )
__lowercase : Union[str, Any] = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1e-4 ) )
| 575 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) ->np.ndarray:
"""simple docstring"""
if (ksize % 2) == 0:
__lowercase : Union[str, Any] = ksize + 1
__lowercase : Union[str, Any] = np.zeros((ksize, ksize), dtype=np.floataa )
# each value
for y in range(_lowerCamelCase ):
for x in range(_lowerCamelCase ):
# distance from center
__lowercase : Union[str, Any] = x - ksize // 2
__lowercase : List[Any] = y - ksize // 2
# degree to radiant
__lowercase : int = theta / 1_80 * np.pi
__lowercase : int = np.cos(_theta )
__lowercase : List[str] = np.sin(_theta )
# get kernel x
__lowercase : Dict = cos_theta * px + sin_theta * py
# get kernel y
__lowercase : Any = -sin_theta * px + cos_theta * py
# fill kernel
__lowercase : List[str] = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__A : List[Any] = imread('../image_data/lena.jpg')
# turn image in gray scale value
__A : List[str] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__A : int = np.zeros(gray.shape[:2])
for theta in [0, 3_0, 6_0, 9_0, 1_2_0, 1_5_0]:
__A : Dict = gabor_filter_kernel(1_0, 8, theta, 1_0, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__A : str = out / out.max() * 2_5_5
__A : Dict = out.astype(np.uinta)
imshow('Original', gray)
imshow('Gabor filter with 20x20 mask and 6 directions', out)
waitKey(0)
| 575 | 1 |
def UpperCamelCase ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(10_00 - i , -10_00 - i , -1 ) ) for i in range(10_00 )]
UpperCAmelCase_ = generate_large_matrix()
UpperCAmelCase_ = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def UpperCamelCase ( lowerCAmelCase_ ) -> None:
'''simple docstring'''
assert all(row == sorted(lowerCAmelCase_ , reverse=lowerCAmelCase_ ) for row in grid )
assert all(list(lowerCAmelCase_ ) == sorted(lowerCAmelCase_ , reverse=lowerCAmelCase_ ) for col in zip(*lowerCAmelCase_ ) )
def UpperCamelCase ( lowerCAmelCase_ ) -> int:
'''simple docstring'''
_A= 0
_A= len(lowerCAmelCase_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
_A= (left + right) // 2
_A= array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
_A= mid + 1
else:
_A= mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowerCAmelCase_ )
def UpperCamelCase ( lowerCAmelCase_ ) -> int:
'''simple docstring'''
_A= 0
_A= len(grid[0] )
for i in range(len(lowerCAmelCase_ ) ):
_A= find_negative_index(grid[i][:bound] )
total += bound
return (len(lowerCAmelCase_ ) * len(grid[0] )) - total
def UpperCamelCase ( lowerCAmelCase_ ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def UpperCamelCase ( lowerCAmelCase_ ) -> int:
'''simple docstring'''
_A= 0
for row in grid:
for i, number in enumerate(lowerCAmelCase_ ):
if number < 0:
total += len(lowerCAmelCase_ ) - i
break
return total
def UpperCamelCase ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('Running benchmarks' )
_A= (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
_A= timeit(F"{func}(grid=grid)" , setup=lowerCAmelCase_ , number=5_00 )
print(F"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 476 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
UpperCAmelCase_ = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 476 | 1 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class lowercase_ :
"""simple docstring"""
def __init__( self : List[str], UpperCamelCase__ : Dict, UpperCamelCase__ : Dict=13, UpperCamelCase__ : Union[str, Any]=7, UpperCamelCase__ : List[str]=False, UpperCamelCase__ : List[Any]=True, UpperCamelCase__ : str=False, UpperCamelCase__ : Tuple=False, UpperCamelCase__ : str=19, UpperCamelCase__ : Tuple=32, UpperCamelCase__ : Optional[Any]=5, UpperCamelCase__ : int=4, UpperCamelCase__ : str=37, UpperCamelCase__ : List[str]="gelu", UpperCamelCase__ : Union[str, Any]=0.1, UpperCamelCase__ : List[Any]=0.1, UpperCamelCase__ : int=5_12, UpperCamelCase__ : int=16, UpperCamelCase__ : str=2, UpperCamelCase__ : Any=0.02, UpperCamelCase__ : Any=3, UpperCamelCase__ : Any=4, UpperCamelCase__ : Union[str, Any]=None, ) -> Tuple:
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = scope
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
_A = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size], self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
_A = ids_tensor([self.batch_size], self.num_choices )
_A = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : str ) -> Dict:
_A = EsmConfig(
vocab_size=33, hidden_size=self.hidden_size, pad_token_id=1, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, is_folding_model=UpperCamelCase__, esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False}, )
return config
def __UpperCAmelCase ( self : Dict, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Optional[int], UpperCamelCase__ : str, UpperCamelCase__ : Dict, UpperCamelCase__ : List[Any], UpperCamelCase__ : int ) -> List[str]:
_A = EsmForProteinFolding(config=UpperCamelCase__ ).float()
model.to(UpperCamelCase__ )
model.eval()
_A = model(UpperCamelCase__, attention_mask=UpperCamelCase__ )
_A = model(UpperCamelCase__ )
_A = model(UpperCamelCase__ )
self.parent.assertEqual(result.positions.shape, (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape, (8, self.batch_size, self.seq_length, 7, 2) )
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = False
__lowerCAmelCase = (EsmForProteinFolding,) if is_torch_available() else ()
__lowerCAmelCase = ()
__lowerCAmelCase = {} if is_torch_available() else {}
__lowerCAmelCase = False
def __UpperCAmelCase ( self : List[str] ) -> Dict:
_A = EsmFoldModelTester(self )
_A = ConfigTester(self, config_class=UpperCamelCase__, hidden_size=37 )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Optional[int] ) -> str:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
@unittest.skip('Does not support attention outputs' )
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
pass
@unittest.skip
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
pass
@unittest.skip('Esm does not support embedding resizing' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
pass
@unittest.skip('Esm does not support embedding resizing' )
def __UpperCAmelCase ( self : str ) -> Any:
pass
@unittest.skip('ESMFold does not support passing input embeds!' )
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def __UpperCAmelCase ( self : str ) -> Optional[int]:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def __UpperCAmelCase ( self : Dict ) -> int:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
pass
@unittest.skip('ESMFold does not output hidden states in the normal way.' )
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
pass
@unittest.skip('ESMfold does not output hidden states in the normal way.' )
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
pass
@unittest.skip('ESMFold only has one output format.' )
def __UpperCAmelCase ( self : Tuple ) -> Any:
pass
@unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality' )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
pass
@unittest.skip('ESMFold does not support input chunking.' )
def __UpperCAmelCase ( self : int ) -> int:
pass
@unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.' )
def __UpperCAmelCase ( self : Optional[int] ) -> str:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def __UpperCAmelCase ( self : str ) -> str:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def __UpperCAmelCase ( self : str ) -> Tuple:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
pass
@unittest.skip('ESMFold doesn\'t support data parallel.' )
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
pass
@require_torch
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
_A = EsmForProteinFolding.from_pretrained('facebook/esmfold_v1' ).float()
model.eval()
_A = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_A = model(UpperCamelCase__ )['positions']
_A = torch.tensor([2.5_828, 0.7_993, -10.9_334], dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0], UpperCamelCase__, atol=1e-4 ) )
| 107 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : Tuple=13 , UpperCamelCase : List[Any]=7 , UpperCamelCase : Optional[int]=True , UpperCamelCase : List[str]=True , UpperCamelCase : int=True , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : Optional[int]=99 , UpperCamelCase : Optional[Any]=32 , UpperCamelCase : Union[str, Any]=5 , UpperCamelCase : Dict=4 , UpperCamelCase : int=37 , UpperCamelCase : Optional[int]="gelu" , UpperCamelCase : int=0.1 , UpperCamelCase : Dict=0.1 , UpperCamelCase : Optional[Any]=512 , UpperCamelCase : int=16 , UpperCamelCase : Union[str, Any]=2 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : Tuple=4 , ):
'''simple docstring'''
__UpperCAmelCase : int = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : Optional[int] = seq_length
__UpperCAmelCase : str = is_training
__UpperCAmelCase : Any = use_attention_mask
__UpperCAmelCase : Dict = use_token_type_ids
__UpperCAmelCase : Any = use_labels
__UpperCAmelCase : int = vocab_size
__UpperCAmelCase : str = hidden_size
__UpperCAmelCase : int = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : int = hidden_act
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : int = attention_probs_dropout_prob
__UpperCAmelCase : Union[str, Any] = max_position_embeddings
__UpperCAmelCase : str = type_vocab_size
__UpperCAmelCase : Optional[Any] = type_sequence_label_size
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : Tuple = num_choices
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : int = None
if self.use_attention_mask:
__UpperCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Optional[int] = None
if self.use_token_type_ids:
__UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : List[str] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = self.prepare_config_and_inputs()
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Any = config_and_inputs
__UpperCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class lowerCamelCase__ ( A , unittest.TestCase ):
"""simple docstring"""
__a = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Dict = FlaxAlbertModelTester(self )
@slow
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Optional[Any] = model_class_name.from_pretrained("""albert-base-v2""" )
__UpperCAmelCase : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase )
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = FlaxAlbertModel.from_pretrained("""albert-base-v2""" )
__UpperCAmelCase : Tuple = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
__UpperCAmelCase : List[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__UpperCAmelCase : List[Any] = model(UpperCamelCase , attention_mask=UpperCamelCase )[0]
__UpperCAmelCase : Tuple = (1, 11, 768)
self.assertEqual(output.shape , UpperCamelCase )
__UpperCAmelCase : Optional[Any] = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCamelCase , atol=1e-4 ) )
| 139 | 0 |
"""simple docstring"""
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
__lowercase : Optional[Any] = 5_0_0_0_3
__lowercase : List[str] = 5_0_0_0_2
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__lowercase :Any = PLBartTokenizer
__lowercase :List[Any] = None
__lowercase :Tuple = False
def _lowerCAmelCase ( self ) -> str:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ = PLBartTokenizer(UpperCamelCase__ , language_codes='''base''' , keep_accents=UpperCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = PLBartTokenizer(UpperCamelCase__ , language_codes='''base''' , keep_accents=UpperCamelCase__ )
lowerCamelCase_ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCamelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCamelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
lowerCamelCase_ = tokenizer.vocab_size
lowerCamelCase_ = [tokenizer.convert_ids_to_tokens(UpperCamelCase__ ) for x in range(end - 4 , UpperCamelCase__ )]
self.assertListEqual(UpperCamelCase__ , ['''__java__''', '''__python__''', '''__en_XX__''', '''<mask>'''] )
lowerCamelCase_ = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
lowerCamelCase_ = tokenizer(UpperCamelCase__ ).input_ids
self.assertEqual(
tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ ) , UpperCamelCase__ , )
def _lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = PLBartTokenizer(UpperCamelCase__ , language_codes='''multi''' , keep_accents=UpperCamelCase__ )
lowerCamelCase_ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCamelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCamelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
lowerCamelCase_ = tokenizer.vocab_size
lowerCamelCase_ = [tokenizer.convert_ids_to_tokens(UpperCamelCase__ ) for x in range(end - 7 , UpperCamelCase__ )]
self.assertListEqual(
UpperCamelCase__ , ['''__java__''', '''__python__''', '''__en_XX__''', '''__javascript__''', '''__php__''', '''__ruby__''', '''__go__'''] )
lowerCamelCase_ = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
lowerCamelCase_ = tokenizer(UpperCamelCase__ ).input_ids
self.assertEqual(
tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ ) , UpperCamelCase__ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
__lowercase :Optional[int] = "uclanlp/plbart-python-en_XX"
__lowercase :Dict = [
"def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])",
"def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])",
]
__lowercase :int = [
"Returns the maximum value of a b c.",
"Sums the values of a b c.",
]
__lowercase :Union[str, Any] = [
1_34,
54_52,
3_34_60,
3_34_41,
3_34_63,
3_34_65,
3_34_63,
3_34_49,
9_88,
20,
3_34_56,
19,
3_34_56,
7_71,
39,
42_58,
8_89,
33_18,
3_34_41,
3_34_63,
3_34_65,
3_34_63,
3_34_49,
24_71,
2,
PYTHON_CODE,
]
@classmethod
def _lowerCAmelCase ( cls ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='''base''' , src_lang='''python''' , tgt_lang='''en_XX''' )
lowerCamelCase_ = 1
return cls
def _lowerCAmelCase ( self ) -> int:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__java__'''] , 50_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__python__'''] , 50_002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__en_XX__'''] , 50_003 )
def _lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
self.assertIn(UpperCamelCase__ , self.tokenizer.all_special_ids )
lowerCamelCase_ = [EN_CODE, 9_037, 33_442, 57, 752, 153, 14, 56, 18, 9, 2]
lowerCamelCase_ = self.tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
lowerCamelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = ['''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''' * 20]
self.assertIsInstance(src_text[0] , UpperCamelCase__ )
lowerCamelCase_ = 10
lowerCamelCase_ = self.tokenizer(UpperCamelCase__ , max_length=UpperCamelCase__ , truncation=UpperCamelCase__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , UpperCamelCase__ )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''__java__'''] ) , [50_004, 50_001] )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCamelCase__ )
lowerCamelCase_ = PLBartTokenizer.from_pretrained(UpperCamelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCamelCase__ )
@require_torch
def _lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase__ , return_tensors='''pt''' )
lowerCamelCase_ = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , UpperCamelCase__ )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def _lowerCAmelCase ( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
lowerCamelCase_ = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
lowerCamelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer(self.src_text , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=3 , return_tensors='''pt''' )
lowerCamelCase_ = self.tokenizer(
text_target=self.tgt_text , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=10 , return_tensors='''pt''' )
lowerCamelCase_ = targets['''input_ids''']
lowerCamelCase_ = shift_tokens_right(UpperCamelCase__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''java''' )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[150, 242, 2, 50_003]],
'''attention_mask''': [[1, 1, 1, 1]],
# java
'''forced_bos_token_id''': 50_001,
} , ) | 66 |
"""simple docstring"""
import argparse
import os
import re
__lowercase : Optional[int] = """src/diffusers"""
# Pattern that looks at the indentation in a line.
__lowercase : Dict = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
__lowercase : int = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__lowercase : Optional[Any] = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
__lowercase : List[str] = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__lowercase : Any = re.compile(r"""\[([^\]]+)\]""")
def lowerCamelCase_ ( _lowerCamelCase : List[str] ):
lowerCamelCase_ = _re_indent.search(_lowerCamelCase )
return "" if search is None else search.groups()[0]
def lowerCamelCase_ ( _lowerCamelCase : int , _lowerCamelCase : List[str]="" , _lowerCamelCase : Dict=None , _lowerCamelCase : int=None ):
lowerCamelCase_ = 0
lowerCamelCase_ = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(_lowerCamelCase ):
index += 1
lowerCamelCase_ = ['''\n'''.join(lines[:index] )]
else:
lowerCamelCase_ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCamelCase_ = [lines[index]]
index += 1
while index < len(_lowerCamelCase ) and (end_prompt is None or not lines[index].startswith(_lowerCamelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_lowerCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(_lowerCamelCase ) )
if index < len(_lowerCamelCase ) - 1:
lowerCamelCase_ = [lines[index + 1]]
index += 1
else:
lowerCamelCase_ = []
else:
blocks.append('''\n'''.join(_lowerCamelCase ) )
lowerCamelCase_ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_lowerCamelCase ) > 0:
blocks.append('''\n'''.join(_lowerCamelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_lowerCamelCase ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def lowerCamelCase_ ( _lowerCamelCase : int ):
def _inner(_lowerCamelCase : List[Any] ):
return key(_lowerCamelCase ).lower().replace('''_''' , '''''' )
return _inner
def lowerCamelCase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple=None ):
# If no key is provided, we use a noop.
def noop(_lowerCamelCase : Union[str, Any] ):
return x
if key is None:
lowerCamelCase_ = noop
# Constants are all uppercase, they go first.
lowerCamelCase_ = [obj for obj in objects if key(_lowerCamelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCamelCase_ = [obj for obj in objects if key(_lowerCamelCase )[0].isupper() and not key(_lowerCamelCase ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCamelCase_ = [obj for obj in objects if not key(_lowerCamelCase )[0].isupper()]
lowerCamelCase_ = ignore_underscore(_lowerCamelCase )
return sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase : Any ):
# This inner function sort imports between [ ].
def _replace(_lowerCamelCase : List[Any] ):
lowerCamelCase_ = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
lowerCamelCase_ = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCamelCase_ = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(_lowerCamelCase )] ) + "]"
lowerCamelCase_ = import_statement.split('''\n''' )
if len(_lowerCamelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCamelCase_ = 2 if lines[1].strip() == '''[''' else 1
lowerCamelCase_ = [(i, _re_strip_line.search(_lowerCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCamelCase_ = sort_objects(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )
lowerCamelCase_ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_lowerCamelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCamelCase_ = _re_bracket_content.sub(_replace , lines[1] )
else:
lowerCamelCase_ = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCamelCase_ = keys[:-1]
lowerCamelCase_ = get_indent(lines[1] ) + ''', '''.join([F"""\"{k}\"""" for k in sort_objects(_lowerCamelCase )] )
return "\n".join(_lowerCamelCase )
else:
# Finally we have to deal with imports fitting on one line
lowerCamelCase_ = _re_bracket_content.sub(_replace , _lowerCamelCase )
return import_statement
def lowerCamelCase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any]=True ):
with open(_lowerCamelCase , '''r''' ) as f:
lowerCamelCase_ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCamelCase_ = split_code_in_indented_blocks(
_lowerCamelCase , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_lowerCamelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCamelCase_ = main_blocks[block_idx]
lowerCamelCase_ = block.split('''\n''' )
# Get to the start of the imports.
lowerCamelCase_ = 0
while line_idx < len(_lowerCamelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCamelCase_ = len(_lowerCamelCase )
else:
line_idx += 1
if line_idx >= len(_lowerCamelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCamelCase_ = '''\n'''.join(block_lines[line_idx:-1] )
lowerCamelCase_ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCamelCase_ = split_code_in_indented_blocks(_lowerCamelCase , indent_level=_lowerCamelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCamelCase_ = _re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCamelCase_ = [(pattern.search(_lowerCamelCase ).groups()[0] if pattern.search(_lowerCamelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCamelCase_ = [(i, key) for i, key in enumerate(_lowerCamelCase ) if key is not None]
lowerCamelCase_ = [x[0] for x in sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCamelCase_ = 0
lowerCamelCase_ = []
for i in range(len(_lowerCamelCase ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
lowerCamelCase_ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(_lowerCamelCase )
count += 1
# And we put our main block back together with its first and last line.
lowerCamelCase_ = '''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(_lowerCamelCase ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(_lowerCamelCase , '''w''' ) as f:
f.write('''\n'''.join(_lowerCamelCase ) )
def lowerCamelCase_ ( _lowerCamelCase : Tuple=True ):
lowerCamelCase_ = []
for root, _, files in os.walk(_lowerCamelCase ):
if "__init__.py" in files:
lowerCamelCase_ = sort_imports(os.path.join(_lowerCamelCase , '''__init__.py''' ) , check_only=_lowerCamelCase )
if result:
lowerCamelCase_ = [os.path.join(_lowerCamelCase , '''__init__.py''' )]
if len(_lowerCamelCase ) > 0:
raise ValueError(F"""Would overwrite {len(_lowerCamelCase )} files, run `make style`.""" )
if __name__ == "__main__":
__lowercase : Any = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
__lowercase : Optional[int] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only) | 66 | 1 |
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
_a : List[str] = logging.get_logger(__name__)
_a : Tuple = "T5Config"
def _a (lowercase__ : jnp.array , lowercase__ : int , lowercase__ : int ) -> jnp.ndarray:
"""simple docstring"""
__snake_case = jnp.zeros_like(lowercase__ )
__snake_case = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
__snake_case = shifted_input_ids.at[:, 0].set(lowercase__ )
__snake_case = jnp.where(shifted_input_ids == -1_0_0 , lowercase__ , lowercase__ )
return shifted_input_ids
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : str = "mt5"
_SCREAMING_SNAKE_CASE : str = MTaConfig
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : List[str] = "mt5"
_SCREAMING_SNAKE_CASE : int = MTaConfig
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "mt5"
_SCREAMING_SNAKE_CASE : Dict = MTaConfig
| 56 |
from ..utils import DummyObject, requires_backends
class lowerCamelCase__ ( metaclass=UpperCAmelCase_):
"""simple docstring"""
_A = ['transformers', 'torch', 'note_seq']
def __init__(self , *__a , **__a ):
'''simple docstring'''
requires_backends(self , ["transformers", "torch", "note_seq"] )
@classmethod
def _a (cls , *__a , **__a ):
'''simple docstring'''
requires_backends(cls , ["transformers", "torch", "note_seq"] )
@classmethod
def _a (cls , *__a , **__a ):
'''simple docstring'''
requires_backends(cls , ["transformers", "torch", "note_seq"] ) | 623 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""microsoft/cvt-13""": """https://huggingface.co/microsoft/cvt-13/resolve/main/config.json""",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class UpperCAmelCase__ ( snake_case ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = 'cvt'
def __init__( self: Union[str, Any] , __lowerCAmelCase: Union[str, Any]=3 , __lowerCAmelCase: Dict=[7, 3, 3] , __lowerCAmelCase: List[str]=[4, 2, 2] , __lowerCAmelCase: Union[str, Any]=[2, 1, 1] , __lowerCAmelCase: Optional[Any]=[64, 192, 384] , __lowerCAmelCase: Optional[int]=[1, 3, 6] , __lowerCAmelCase: Any=[1, 2, 10] , __lowerCAmelCase: List[Any]=[4.0, 4.0, 4.0] , __lowerCAmelCase: List[Any]=[0.0, 0.0, 0.0] , __lowerCAmelCase: Optional[Any]=[0.0, 0.0, 0.0] , __lowerCAmelCase: Optional[int]=[0.0, 0.0, 0.1] , __lowerCAmelCase: Tuple=[True, True, True] , __lowerCAmelCase: Optional[Any]=[False, False, True] , __lowerCAmelCase: Tuple=["dw_bn", "dw_bn", "dw_bn"] , __lowerCAmelCase: Optional[int]=[3, 3, 3] , __lowerCAmelCase: Optional[int]=[1, 1, 1] , __lowerCAmelCase: Optional[Any]=[2, 2, 2] , __lowerCAmelCase: Optional[Any]=[1, 1, 1] , __lowerCAmelCase: str=[1, 1, 1] , __lowerCAmelCase: Optional[Any]=0.02 , __lowerCAmelCase: List[str]=1E-12 , **__lowerCAmelCase: Any , ) -> int:
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
__UpperCAmelCase = num_channels
__UpperCAmelCase = patch_sizes
__UpperCAmelCase = patch_stride
__UpperCAmelCase = patch_padding
__UpperCAmelCase = embed_dim
__UpperCAmelCase = num_heads
__UpperCAmelCase = depth
__UpperCAmelCase = mlp_ratio
__UpperCAmelCase = attention_drop_rate
__UpperCAmelCase = drop_rate
__UpperCAmelCase = drop_path_rate
__UpperCAmelCase = qkv_bias
__UpperCAmelCase = cls_token
__UpperCAmelCase = qkv_projection_method
__UpperCAmelCase = kernel_qkv
__UpperCAmelCase = padding_kv
__UpperCAmelCase = stride_kv
__UpperCAmelCase = padding_q
__UpperCAmelCase = stride_q
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
| 286 | import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __lowerCAmelCase ( A_ : Optional[Any] ) -> List[str]:
__UpperCAmelCase = torch.exp(A_ )
__UpperCAmelCase = torch.sum(A_ , dim=1 ) # sum of exp(x_i)
__UpperCAmelCase = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(A_ ) - B / A
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self: Optional[Any] , __lowerCAmelCase: int ) -> List[Any]:
'''simple docstring'''
super().__init__()
__UpperCAmelCase = config.output_attentions
__UpperCAmelCase = config.output_hidden_states
__UpperCAmelCase = nn.ModuleList([BertLayer(__lowerCAmelCase ) for _ in range(config.num_hidden_layers )] )
__UpperCAmelCase = nn.ModuleList([BertHighway(__lowerCAmelCase ) for _ in range(config.num_hidden_layers )] )
__UpperCAmelCase = [-1 for _ in range(config.num_hidden_layers )]
def _UpperCAmelCase ( self: Optional[Any] , __lowerCAmelCase: List[str] ) -> Optional[Any]:
'''simple docstring'''
if (type(__lowerCAmelCase ) is float) or (type(__lowerCAmelCase ) is int):
for i in range(len(self.early_exit_entropy ) ):
__UpperCAmelCase = x
else:
__UpperCAmelCase = x
def _UpperCAmelCase ( self: Union[str, Any] , __lowerCAmelCase: Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def _UpperCAmelCase ( self: Dict , __lowerCAmelCase: Optional[int] , __lowerCAmelCase: Union[str, Any]=None , __lowerCAmelCase: Any=None , __lowerCAmelCase: Optional[int]=None , __lowerCAmelCase: Optional[Any]=None , ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = ()
__UpperCAmelCase = ()
__UpperCAmelCase = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
__UpperCAmelCase = all_hidden_states + (hidden_states,)
__UpperCAmelCase = layer_module(
__lowerCAmelCase , __lowerCAmelCase , head_mask[i] , __lowerCAmelCase , __lowerCAmelCase )
__UpperCAmelCase = layer_outputs[0]
if self.output_attentions:
__UpperCAmelCase = all_attentions + (layer_outputs[1],)
__UpperCAmelCase = (hidden_states,)
if self.output_hidden_states:
__UpperCAmelCase = current_outputs + (all_hidden_states,)
if self.output_attentions:
__UpperCAmelCase = current_outputs + (all_attentions,)
__UpperCAmelCase = self.highway[i](__lowerCAmelCase )
# logits, pooled_output
if not self.training:
__UpperCAmelCase = highway_exit[0]
__UpperCAmelCase = entropy(__lowerCAmelCase )
__UpperCAmelCase = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
__UpperCAmelCase = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
__UpperCAmelCase = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(__lowerCAmelCase , i + 1 )
else:
__UpperCAmelCase = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
__UpperCAmelCase = all_hidden_states + (hidden_states,)
__UpperCAmelCase = (hidden_states,)
if self.output_hidden_states:
__UpperCAmelCase = outputs + (all_hidden_states,)
if self.output_attentions:
__UpperCAmelCase = outputs + (all_attentions,)
__UpperCAmelCase = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'The Bert Model transformer with early exiting (DeeBERT). ' , snake_case , )
class UpperCAmelCase__ ( snake_case ):
"""simple docstring"""
def __init__( self: Union[str, Any] , __lowerCAmelCase: Optional[Any] ) -> int:
'''simple docstring'''
super().__init__(__lowerCAmelCase )
__UpperCAmelCase = config
__UpperCAmelCase = BertEmbeddings(__lowerCAmelCase )
__UpperCAmelCase = DeeBertEncoder(__lowerCAmelCase )
__UpperCAmelCase = BertPooler(__lowerCAmelCase )
self.init_weights()
def _UpperCAmelCase ( self: Any ) -> Optional[Any]:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def _UpperCAmelCase ( self: List[str] ) -> Tuple:
'''simple docstring'''
return self.embeddings.word_embeddings
def _UpperCAmelCase ( self: Optional[Any] , __lowerCAmelCase: List[str] ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = value
def _UpperCAmelCase ( self: Union[str, Any] , __lowerCAmelCase: Optional[int] ) -> Any:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(__lowerCAmelCase )
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
def _UpperCAmelCase ( self: Optional[int] , __lowerCAmelCase: Dict=None , __lowerCAmelCase: Tuple=None , __lowerCAmelCase: List[str]=None , __lowerCAmelCase: Union[str, Any]=None , __lowerCAmelCase: str=None , __lowerCAmelCase: Dict=None , __lowerCAmelCase: Optional[int]=None , __lowerCAmelCase: Tuple=None , ) -> List[str]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
__UpperCAmelCase = input_ids.size()
elif inputs_embeds is not None:
__UpperCAmelCase = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
__UpperCAmelCase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__UpperCAmelCase = torch.ones(__lowerCAmelCase , device=__lowerCAmelCase )
if encoder_attention_mask is None:
__UpperCAmelCase = torch.ones(__lowerCAmelCase , device=__lowerCAmelCase )
if token_type_ids is None:
__UpperCAmelCase = torch.zeros(__lowerCAmelCase , dtype=torch.long , device=__lowerCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__UpperCAmelCase = self.get_extended_attention_mask(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
__UpperCAmelCase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
__UpperCAmelCase = encoder_attention_mask[:, None, None, :]
__UpperCAmelCase = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
__UpperCAmelCase = (1.0 - encoder_extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__UpperCAmelCase = self.get_head_mask(__lowerCAmelCase , self.config.num_hidden_layers )
__UpperCAmelCase = self.embeddings(
input_ids=__lowerCAmelCase , position_ids=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , inputs_embeds=__lowerCAmelCase )
__UpperCAmelCase = self.encoder(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , head_mask=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , encoder_attention_mask=__lowerCAmelCase , )
__UpperCAmelCase = encoder_outputs[0]
__UpperCAmelCase = self.pooler(__lowerCAmelCase )
__UpperCAmelCase = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class UpperCAmelCase__ ( snake_case ):
"""simple docstring"""
def __init__( self: Dict , __lowerCAmelCase: Dict , __lowerCAmelCase: List[str] ) -> Any:
'''simple docstring'''
__UpperCAmelCase = message
__UpperCAmelCase = exit_layer # start from 1!
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self: Union[str, Any] , __lowerCAmelCase: int ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
__UpperCAmelCase = BertPooler(__lowerCAmelCase )
__UpperCAmelCase = nn.Dropout(config.hidden_dropout_prob )
__UpperCAmelCase = nn.Linear(config.hidden_size , config.num_labels )
def _UpperCAmelCase ( self: Optional[int] , __lowerCAmelCase: Any ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = encoder_outputs[0]
__UpperCAmelCase = self.pooler(__lowerCAmelCase )
# "return" pooler_output
# BertModel
__UpperCAmelCase = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
__UpperCAmelCase = bmodel_output[1]
__UpperCAmelCase = self.dropout(__lowerCAmelCase )
__UpperCAmelCase = self.classifier(__lowerCAmelCase )
return logits, pooled_output
@add_start_docstrings(
'Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. ' , snake_case , )
class UpperCAmelCase__ ( snake_case ):
"""simple docstring"""
def __init__( self: Optional[Any] , __lowerCAmelCase: List[str] ) -> str:
'''simple docstring'''
super().__init__(__lowerCAmelCase )
__UpperCAmelCase = config.num_labels
__UpperCAmelCase = config.num_hidden_layers
__UpperCAmelCase = DeeBertModel(__lowerCAmelCase )
__UpperCAmelCase = nn.Dropout(config.hidden_dropout_prob )
__UpperCAmelCase = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
def _UpperCAmelCase ( self: Any , __lowerCAmelCase: Any=None , __lowerCAmelCase: Optional[int]=None , __lowerCAmelCase: Optional[int]=None , __lowerCAmelCase: List[str]=None , __lowerCAmelCase: Dict=None , __lowerCAmelCase: int=None , __lowerCAmelCase: str=None , __lowerCAmelCase: Optional[Any]=-1 , __lowerCAmelCase: str=False , ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = self.num_layers
try:
__UpperCAmelCase = self.bert(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , position_ids=__lowerCAmelCase , head_mask=__lowerCAmelCase , inputs_embeds=__lowerCAmelCase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
__UpperCAmelCase = outputs[1]
__UpperCAmelCase = self.dropout(__lowerCAmelCase )
__UpperCAmelCase = self.classifier(__lowerCAmelCase )
__UpperCAmelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__UpperCAmelCase = e.message
__UpperCAmelCase = e.exit_layer
__UpperCAmelCase = outputs[0]
if not self.training:
__UpperCAmelCase = entropy(__lowerCAmelCase )
__UpperCAmelCase = []
__UpperCAmelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__UpperCAmelCase = MSELoss()
__UpperCAmelCase = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__UpperCAmelCase = CrossEntropyLoss()
__UpperCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__UpperCAmelCase = []
for highway_exit in outputs[-1]:
__UpperCAmelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(__lowerCAmelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__UpperCAmelCase = MSELoss()
__UpperCAmelCase = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__UpperCAmelCase = CrossEntropyLoss()
__UpperCAmelCase = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__lowerCAmelCase )
if train_highway:
__UpperCAmelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__UpperCAmelCase = (loss,) + outputs
if not self.training:
__UpperCAmelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__UpperCAmelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 286 | 1 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any]=13 , __lowerCAmelCase : int=7 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Tuple=99 , __lowerCAmelCase : Any=24 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : str=6 , __lowerCAmelCase : Optional[Any]=37 , __lowerCAmelCase : Optional[Any]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : int=5_12 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : List[Any]=0.02 , __lowerCAmelCase : Optional[Any]=3 , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Any=10_00 , ) -> Tuple:
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = scope
_A = range_bbox
def snake_case_ ( self : int ) -> str:
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_A = bbox[i, j, 3]
_A = bbox[i, j, 1]
_A = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_A = bbox[i, j, 2]
_A = bbox[i, j, 0]
_A = t
_A = None
if self.use_input_mask:
_A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case_ ( self : str ) -> Union[str, Any]:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def snake_case_ ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , ) -> Dict:
_A = LiltModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_A = model(__lowerCAmelCase , bbox=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
_A = model(__lowerCAmelCase , bbox=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
_A = model(__lowerCAmelCase , bbox=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : str , ) -> Tuple:
_A = self.num_labels
_A = LiltForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_A = model(
__lowerCAmelCase , bbox=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , ) -> int:
_A = LiltForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_A = model(
__lowerCAmelCase , bbox=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ ( self : Dict ) -> Dict:
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( _A , _A , _A , unittest.TestCase):
"""simple docstring"""
a__ : Tuple = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
a__ : Optional[Any] = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ : Optional[int] = False
a__ : Any = False
def snake_case_ ( self : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : str ) -> Optional[int]:
return True
def snake_case_ ( self : Tuple ) -> Optional[int]:
_A = LiltModelTester(self )
_A = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def snake_case_ ( self : Union[str, Any] ) -> str:
self.config_tester.run_common_tests()
def snake_case_ ( self : Optional[Any] ) -> Dict:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def snake_case_ ( self : Any ) -> Tuple:
_A = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_A = type
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def snake_case_ ( self : Optional[int] ) -> Optional[Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
def snake_case_ ( self : int ) -> Any:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
@slow
def snake_case_ ( self : Optional[int] ) -> List[Any]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = LiltModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_torch
@slow
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def snake_case_ ( self : str ) -> Tuple:
_A = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(__lowerCAmelCase )
_A = torch.tensor([[1, 2]] , device=__lowerCAmelCase )
_A = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__lowerCAmelCase )
# forward pass
with torch.no_grad():
_A = model(input_ids=__lowerCAmelCase , bbox=__lowerCAmelCase )
_A = torch.Size([1, 2, 7_68] )
_A = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=__lowerCAmelCase , )
self.assertTrue(outputs.last_hidden_state.shape , __lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __lowerCAmelCase , atol=1E-3 ) )
| 2 | '''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowercase__ : int = {
"gwf-440k": {
"url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt",
"sample_rate": 4_8000,
"sample_size": 6_5536,
},
"jmann-small-190k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt",
"sample_rate": 4_8000,
"sample_size": 6_5536,
},
"jmann-large-580k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt",
"sample_rate": 4_8000,
"sample_size": 13_1072,
},
"maestro-uncond-150k": {
"url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt",
"sample_rate": 1_6000,
"sample_size": 6_5536,
},
"unlocked-uncond-250k": {
"url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt",
"sample_rate": 1_6000,
"sample_size": 6_5536,
},
"honk-140k": {
"url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt",
"sample_rate": 1_6000,
"sample_size": 6_5536,
},
}
def __lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return torch.atana(_UpperCamelCase , _UpperCamelCase ) / math.pi * 2
def __lowerCamelCase ( _UpperCamelCase : List[str] ):
'''simple docstring'''
UpperCAmelCase_ = torch.sin(t * math.pi / 2 ) ** 2
UpperCAmelCase_ = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(_UpperCamelCase , _UpperCamelCase )
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
pass
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Tuple ) ->Optional[Any]:
super().__init__()
UpperCAmelCase_ = DiffusionAttnUnetaD(UpperCAmelCase__ , n_attn_layers=4 )
UpperCAmelCase_ = deepcopy(self.diffusion )
UpperCAmelCase_ = torch.quasirandom.SobolEngine(1 , scramble=UpperCAmelCase__ )
def __lowerCamelCase ( _UpperCamelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = MODELS_MAP[model_name]['''url''']
os.system(F"""wget {url} ./""" )
return F"""./{model_name}.ckpt"""
lowercase__ : str = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
}
lowercase__ : Any = {
"8": "resnets.0",
"9": "attentions.0",
"10": "resnets.1",
"11": "attentions.1",
"12": "resnets.2",
"13": "attentions.2",
}
lowercase__ : Optional[Any] = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
"8": "resnets.3",
"9": "attentions.3",
"10": "resnets.4",
"11": "attentions.4",
"12": "resnets.5",
"13": "attentions.5",
}
lowercase__ : Optional[Any] = {
"0": "resnets.0",
"1": "resnets.1",
"2": "resnets.2",
"4": "resnets.0",
"5": "resnets.1",
"6": "resnets.2",
}
lowercase__ : str = {
"skip": "conv_skip",
"main.0": "conv_1",
"main.1": "group_norm_1",
"main.3": "conv_2",
"main.4": "group_norm_2",
}
lowercase__ : Optional[int] = {
"norm": "group_norm",
"qkv_proj": ["query", "key", "value"],
"out_proj": ["proj_attn"],
}
def __lowerCamelCase ( _UpperCamelCase : List[Any] ):
'''simple docstring'''
if name.startswith('''skip''' ):
return name.replace('''skip''' , RES_CONV_MAP['''skip'''] )
# name has to be of format main.{digit}
if not name.startswith('''main.''' ):
raise ValueError(F"""ResConvBlock error with {name}""" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __lowerCamelCase ( _UpperCamelCase : Any ):
'''simple docstring'''
for key, value in ATTN_MAP.items():
if name.startswith(_UpperCamelCase ) and not isinstance(_UpperCamelCase , _UpperCamelCase ):
return name.replace(_UpperCamelCase , _UpperCamelCase )
elif name.startswith(_UpperCamelCase ):
return [name.replace(_UpperCamelCase , _UpperCamelCase ) for v in value]
raise ValueError(F"""Attn error with {name}""" )
def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any=13 ):
'''simple docstring'''
UpperCAmelCase_ = input_string
if string.split('''.''' )[0] == "timestep_embed":
return string.replace('''timestep_embed''' , '''time_proj''' )
UpperCAmelCase_ = 0
if string.startswith('''net.3.''' ):
depth += 1
UpperCAmelCase_ = string[6:]
elif string.startswith('''net.''' ):
UpperCAmelCase_ = string[4:]
while string.startswith('''main.7.''' ):
depth += 1
UpperCAmelCase_ = string[7:]
if string.startswith('''main.''' ):
UpperCAmelCase_ = string[5:]
# mid block
if string[:2].isdigit():
UpperCAmelCase_ = string[:2]
UpperCAmelCase_ = string[2:]
else:
UpperCAmelCase_ = string[0]
UpperCAmelCase_ = string[1:]
if depth == max_depth:
UpperCAmelCase_ = MID_NUM_TO_LAYER[layer_num]
UpperCAmelCase_ = '''mid_block'''
elif depth > 0 and int(_UpperCamelCase ) < 7:
UpperCAmelCase_ = DOWN_NUM_TO_LAYER[layer_num]
UpperCAmelCase_ = F"""down_blocks.{depth}"""
elif depth > 0 and int(_UpperCamelCase ) > 7:
UpperCAmelCase_ = UP_NUM_TO_LAYER[layer_num]
UpperCAmelCase_ = F"""up_blocks.{max_depth - depth - 1}"""
elif depth == 0:
UpperCAmelCase_ = DEPTH_0_TO_LAYER[layer_num]
UpperCAmelCase_ = F"""up_blocks.{max_depth - 1}""" if int(_UpperCamelCase ) > 3 else '''down_blocks.0'''
if not string_left.startswith('''.''' ):
raise ValueError(F"""Naming error with {input_string} and string_left: {string_left}.""" )
UpperCAmelCase_ = string_left[1:]
if "resnets" in new_layer:
UpperCAmelCase_ = convert_resconv_naming(_UpperCamelCase )
elif "attentions" in new_layer:
UpperCAmelCase_ = convert_attn_naming(_UpperCamelCase )
UpperCAmelCase_ = new_string_left
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ = prefix + '''.''' + new_layer + '''.''' + string_left
else:
UpperCAmelCase_ = [prefix + '''.''' + new_layer + '''.''' + s for s in string_left]
return new_string
def __lowerCamelCase ( _UpperCamelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = {}
for k, v in state_dict.items():
if k.endswith('''kernel''' ):
# up- and downsample layers, don't have trainable weights
continue
UpperCAmelCase_ = rename(_UpperCamelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ = transform_conv_attns(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
UpperCAmelCase_ = v
return new_state_dict
def __lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ):
'''simple docstring'''
if len(_UpperCamelCase ) == 1:
if len(v.shape ) == 3:
# weight
UpperCAmelCase_ = v[:, :, 0]
else:
# bias
UpperCAmelCase_ = v
else:
# qkv matrices
UpperCAmelCase_ = v.shape[0]
UpperCAmelCase_ = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
UpperCAmelCase_ = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
UpperCAmelCase_ = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __lowerCamelCase ( _UpperCamelCase : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
UpperCAmelCase_ = args.model_path.split('''/''' )[-1].split('''.''' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F"""Make sure to provide one of the official model names {MODELS_MAP.keys()}"""
UpperCAmelCase_ = download(_UpperCamelCase )
UpperCAmelCase_ = MODELS_MAP[model_name]['''sample_rate''']
UpperCAmelCase_ = MODELS_MAP[model_name]['''sample_size''']
UpperCAmelCase_ = Object()
UpperCAmelCase_ = sample_size
UpperCAmelCase_ = sample_rate
UpperCAmelCase_ = 0
UpperCAmelCase_ = UNetaDModel(sample_size=_UpperCamelCase , sample_rate=_UpperCamelCase )
UpperCAmelCase_ = diffusers_model.state_dict()
UpperCAmelCase_ = DiffusionUncond(_UpperCamelCase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=_UpperCamelCase )['''state_dict'''] )
UpperCAmelCase_ = orig_model.diffusion_ema.eval()
UpperCAmelCase_ = orig_model.state_dict()
UpperCAmelCase_ = rename_orig_weights(_UpperCamelCase )
UpperCAmelCase_ = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
UpperCAmelCase_ = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(_UpperCamelCase ) == 0, F"""Problem with {renamed_minus_diffusers}"""
assert all(k.endswith('''kernel''' ) for k in list(_UpperCamelCase ) ), F"""Problem with {diffusers_minus_renamed}"""
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F"""Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"""
if key == "time_proj.weight":
UpperCAmelCase_ = value.squeeze()
UpperCAmelCase_ = value
diffusers_model.load_state_dict(_UpperCamelCase )
UpperCAmelCase_ = 100
UpperCAmelCase_ = 33
UpperCAmelCase_ = IPNDMScheduler(num_train_timesteps=_UpperCamelCase )
UpperCAmelCase_ = torch.manual_seed(_UpperCamelCase )
UpperCAmelCase_ = torch.randn([1, 2, config.sample_size] , generator=_UpperCamelCase ).to(_UpperCamelCase )
UpperCAmelCase_ = torch.linspace(1 , 0 , steps + 1 , device=_UpperCamelCase )[:-1]
UpperCAmelCase_ = get_crash_schedule(_UpperCamelCase )
UpperCAmelCase_ = DanceDiffusionPipeline(unet=_UpperCamelCase , scheduler=_UpperCamelCase )
UpperCAmelCase_ = torch.manual_seed(33 )
UpperCAmelCase_ = pipe(num_inference_steps=_UpperCamelCase , generator=_UpperCamelCase ).audios
UpperCAmelCase_ = sampling.iplms_sample(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , {} )
UpperCAmelCase_ = generated.clamp(-1 , 1 )
UpperCAmelCase_ = (generated - audio).abs().sum()
UpperCAmelCase_ = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('''Diff sum''' , _UpperCamelCase )
print('''Diff max''' , _UpperCamelCase )
assert diff_max < 1E-3, F"""Diff max: {diff_max} is too much :-/"""
print(F"""Conversion for {model_name} successful!""" )
if __name__ == "__main__":
lowercase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
lowercase__ : Any = parser.parse_args()
main(args)
| 390 | 0 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
lowerCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_UpperCAmelCase = model_type_to_module_name(lowercase__ )
_UpperCAmelCase = importlib.import_module(F""".{module_name}""",'transformers.models' )
try:
return getattr(lowercase__,lowercase__ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(lowercase__,'__name__',lowercase__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_UpperCAmelCase = importlib.import_module('transformers' )
if hasattr(lowercase__,lowercase__ ):
return getattr(lowercase__,lowercase__ )
return None
def __lowerCamelCase ( SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE = None,SCREAMING_SNAKE_CASE = False,SCREAMING_SNAKE_CASE = False,SCREAMING_SNAKE_CASE = None,SCREAMING_SNAKE_CASE = None,SCREAMING_SNAKE_CASE = None,SCREAMING_SNAKE_CASE = False,**SCREAMING_SNAKE_CASE,) -> str:
"""simple docstring"""
_UpperCAmelCase = get_file_from_repo(
lowercase__,lowercase__,cache_dir=lowercase__,force_download=lowercase__,resume_download=lowercase__,proxies=lowercase__,use_auth_token=lowercase__,revision=lowercase__,local_files_only=lowercase__,)
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(lowercase__,encoding='utf-8' ) as reader:
return json.load(lowercase__ )
class lowerCAmelCase :
def __init__( self ):
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(lowerCAmelCase_ )
def __A ( cls , a__ , **a__ ):
_UpperCAmelCase = kwargs.pop('config' , lowerCAmelCase_ )
_UpperCAmelCase = kwargs.pop('trust_remote_code' , lowerCAmelCase_ )
_UpperCAmelCase = True
_UpperCAmelCase , _UpperCAmelCase = ImageProcessingMixin.get_image_processor_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
_UpperCAmelCase = config_dict.get('image_processor_type' , lowerCAmelCase_ )
_UpperCAmelCase = None
if "AutoImageProcessor" in config_dict.get('auto_map' , {} ):
_UpperCAmelCase = config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_UpperCAmelCase = config_dict.pop('feature_extractor_type' , lowerCAmelCase_ )
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.' )
_UpperCAmelCase = feature_extractor_class.replace('FeatureExtractor' , 'ImageProcessor' )
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
_UpperCAmelCase = config_dict['auto_map']['AutoFeatureExtractor']
_UpperCAmelCase = feature_extractor_auto_map.replace('FeatureExtractor' , 'ImageProcessor' )
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
# It could be in `config.image_processor_type``
_UpperCAmelCase = getattr(lowerCAmelCase_ , 'image_processor_type' , lowerCAmelCase_ )
if hasattr(lowerCAmelCase_ , 'auto_map' ) and "AutoImageProcessor" in config.auto_map:
_UpperCAmelCase = config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
_UpperCAmelCase = image_processor_class_from_name(lowerCAmelCase_ )
_UpperCAmelCase = image_processor_auto_map is not None
_UpperCAmelCase = image_processor_class is not None or type(lowerCAmelCase_ ) in IMAGE_PROCESSOR_MAPPING
_UpperCAmelCase = resolve_trust_remote_code(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if has_remote_code and trust_remote_code:
_UpperCAmelCase = get_class_from_dynamic_module(
lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
_UpperCAmelCase = kwargs.pop('code_revision' , lowerCAmelCase_ )
if os.path.isdir(lowerCAmelCase_ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
elif image_processor_class is not None:
return image_processor_class.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(lowerCAmelCase_ ) in IMAGE_PROCESSOR_MAPPING:
_UpperCAmelCase = IMAGE_PROCESSOR_MAPPING[type(lowerCAmelCase_ )]
return image_processor_class.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
raise ValueError(
f"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
f"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
f"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def __A ( a__ , a__ ):
IMAGE_PROCESSOR_MAPPING.register(lowerCAmelCase_ , lowerCAmelCase_ )
| 719 |
"""simple docstring"""
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> list:
"""simple docstring"""
_UpperCAmelCase = False
while is_sorted is False: # Until all the indices are traversed keep looping
_UpperCAmelCase = True
for i in range(0,len(SCREAMING_SNAKE_CASE ) - 1,2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
_UpperCAmelCase , _UpperCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCAmelCase = False
for i in range(1,len(SCREAMING_SNAKE_CASE ) - 1,2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
_UpperCAmelCase , _UpperCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCAmelCase = False
return input_list
if __name__ == "__main__":
print('''Enter list to be sorted''')
lowerCAmelCase_ = [int(x) for x in input().split()]
# inputing elements of the list in one line
lowerCAmelCase_ = odd_even_sort(input_list)
print('''The sorted list is''')
print(sorted_list)
| 494 | 0 |
def __A ( _A ):
"""simple docstring"""
if not isinstance(_A , _A ):
raise ValueError("multiplicative_persistence() only accepts integral values" )
if num < 0:
raise ValueError("multiplicative_persistence() does not accept negative values" )
__a = 0
__a = str(_A )
while len(_A ) != 1:
__a = [int(_A ) for i in num_string]
__a = 1
for i in range(0 , len(_A ) ):
total *= numbers[i]
__a = str(_A )
steps += 1
return steps
def __A ( _A ):
"""simple docstring"""
if not isinstance(_A , _A ):
raise ValueError("additive_persistence() only accepts integral values" )
if num < 0:
raise ValueError("additive_persistence() does not accept negative values" )
__a = 0
__a = str(_A )
while len(_A ) != 1:
__a = [int(_A ) for i in num_string]
__a = 0
for i in range(0 , len(_A ) ):
total += numbers[i]
__a = str(_A )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 197 | import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE : Union[str, Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
SCREAMING_SNAKE_CASE : List[Any] = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
SCREAMING_SNAKE_CASE : Any = """pt""" if is_torch_available() else """tf"""
@require_sentencepiece
@require_tokenizers
class A_ ( a_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = CamembertTokenizer
_SCREAMING_SNAKE_CASE = CamembertTokenizerFast
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
def _UpperCAmelCase ( self : Optional[int] ):
super().setUp()
# We have a SentencePiece fixture for testing
__a = CamembertTokenizer(__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self : Any ):
__a = "<pad>"
__a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Any ):
__a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>NOTUSED" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 10_04 )
def _UpperCAmelCase ( self : Optional[int] ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_05 )
def _UpperCAmelCase ( self : int ):
__a = CamembertTokenizer(__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
__a = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
__a = "I was born in 92000, and this is falsé."
__a = tokenizer.encode(__SCREAMING_SNAKE_CASE )
__a = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
__a = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
__a = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
__a = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : List[Any] ):
if not self.test_rust_tokenizer:
return
__a = self.get_tokenizer()
__a = self.get_rust_tokenizer()
__a = "I was born in 92000, and this is falsé."
__a = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
__a = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
__a = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = self.get_rust_tokenizer()
__a = tokenizer.encode(__SCREAMING_SNAKE_CASE )
__a = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def _UpperCAmelCase ( self : str ):
# fmt: off
__a = {"input_ids": [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
__a = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name="camembert-base" , revision="3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf" , sequences=__SCREAMING_SNAKE_CASE , )
| 197 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_gpt_bigcode': ['GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTBigCodeConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTBigCodeForSequenceClassification',
'GPTBigCodeForTokenClassification',
'GPTBigCodeForCausalLM',
'GPTBigCodeModel',
'GPTBigCodePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 283 | """simple docstring"""
from __future__ import annotations
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
_lowercase : Any = [True] * limit
_lowercase : Union[str, Any] = False
_lowercase : Any = False
_lowercase : List[Any] = True
for i in range(3 ,int(limit**0.5 + 1 ) ,2 ):
_lowercase : Optional[Any] = i * 2
while index < limit:
_lowercase : int = False
_lowercase : Tuple = index + i
_lowercase : Optional[Any] = [2]
for i in range(3 ,__UpperCAmelCase ,2 ):
if is_prime[i]:
primes.append(__UpperCAmelCase )
return primes
def __lowerCAmelCase( __UpperCAmelCase = 1_000_000 ):
"""simple docstring"""
_lowercase : Optional[Any] = prime_sieve(__UpperCAmelCase )
_lowercase : int = 0
_lowercase : List[Any] = 0
for i in range(len(__UpperCAmelCase ) ):
for j in range(i + length ,len(__UpperCAmelCase ) ):
_lowercase : List[Any] = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
_lowercase : Optional[int] = j - i
_lowercase : Tuple = sol
return largest
if __name__ == "__main__":
print(f"""{solution() = }""")
| 283 | 1 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
UpperCAmelCase_ = False
UpperCAmelCase_ = True
UpperCAmelCase_ = False
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""--repo_path""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = {
"""image_size""": """sample_size""",
"""num_res_blocks""": """layers_per_block""",
"""block_channels""": """block_out_channels""",
"""down_blocks""": """down_block_types""",
"""up_blocks""": """up_block_types""",
"""downscale_freq_shift""": """freq_shift""",
"""resnet_num_groups""": """norm_num_groups""",
"""resnet_act_fn""": """act_fn""",
"""resnet_eps""": """norm_eps""",
"""num_head_channels""": """attention_head_dim""",
}
UpperCAmelCase_ = {
"""time_steps""": """time_proj""",
"""mid""": """mid_block""",
"""downsample_blocks""": """down_blocks""",
"""upsample_blocks""": """up_blocks""",
}
UpperCAmelCase_ = """""" if has_file(args.repo_path, """config.json""") else """unet"""
with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader:
UpperCAmelCase_ = reader.read()
UpperCAmelCase_ = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, """config.json"""):
UpperCAmelCase_ = UNetaDModel(**config)
else:
UpperCAmelCase_ = UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel
UpperCAmelCase_ = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
UpperCAmelCase_ = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
UpperCAmelCase_ = config[key]
del config[key]
UpperCAmelCase_ = [k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]]
UpperCAmelCase_ = [k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]]
if do_only_weights:
UpperCAmelCase_ = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin"""))
UpperCAmelCase_ = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""):
continue
UpperCAmelCase_ = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(""".""")[0] == key:
UpperCAmelCase_ = param_value
UpperCAmelCase_ = True
if not has_changed:
UpperCAmelCase_ = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 2 |
"""simple docstring"""
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def lowerCamelCase (a_ :int) -> List[str]:
random.seed(a_)
np.random.seed(a_)
torch.manual_seed(a_)
torch.cuda.manual_seed_all(a_)
# ^^ safe to call this function even if cuda is not available
class __magic_name__ :
def __init__( self : Optional[Any] , snake_case__ : Iterable[torch.nn.Parameter] , snake_case__ : float = 0.99_99 , snake_case__ : float = 0.0 , snake_case__ : int = 0 , snake_case__ : bool = False , snake_case__ : Union[float, int] = 1.0 , snake_case__ : Union[float, int] = 2 / 3 , snake_case__ : Optional[Any] = None , snake_case__ : Dict[str, Any] = None , **snake_case__ : Tuple , ):
'''simple docstring'''
if isinstance(snake_case__ , torch.nn.Module ):
lowercase :int = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage`''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ , )
lowercase :Dict = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
lowercase :Optional[Any] = True
if kwargs.get('''max_value''' , snake_case__ ) is not None:
lowercase :Optional[Any] = '''The `max_value` argument is deprecated. Please use `decay` instead.'''
deprecate('''max_value''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ )
lowercase :Optional[int] = kwargs['''max_value''']
if kwargs.get('''min_value''' , snake_case__ ) is not None:
lowercase :List[Any] = '''The `min_value` argument is deprecated. Please use `min_decay` instead.'''
deprecate('''min_value''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ )
lowercase :str = kwargs['''min_value''']
lowercase :Any = list(snake_case__ )
lowercase :Optional[Any] = [p.clone().detach() for p in parameters]
if kwargs.get('''device''' , snake_case__ ) is not None:
lowercase :str = '''The `device` argument is deprecated. Please use `to` instead.'''
deprecate('''device''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ )
self.to(device=kwargs['''device'''] )
lowercase :int = None
lowercase :int = decay
lowercase :Union[str, Any] = min_decay
lowercase :List[Any] = update_after_step
lowercase :Union[str, Any] = use_ema_warmup
lowercase :Any = inv_gamma
lowercase :Any = power
lowercase :str = 0
lowercase :int = None # set in `step()`
lowercase :List[str] = model_cls
lowercase :Any = model_config
@classmethod
def __snake_case ( cls : int , snake_case__ : Tuple , snake_case__ : Union[str, Any] ):
'''simple docstring'''
lowercase , lowercase :int = model_cls.load_config(snake_case__ , return_unused_kwargs=snake_case__ )
lowercase :List[Any] = model_cls.from_pretrained(snake_case__ )
lowercase :Optional[int] = cls(model.parameters() , model_cls=snake_case__ , model_config=model.config )
ema_model.load_state_dict(snake_case__ )
return ema_model
def __snake_case ( self : int , snake_case__ : Union[str, Any] ):
'''simple docstring'''
if self.model_cls is None:
raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''' )
if self.model_config is None:
raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''' )
lowercase :Dict = self.model_cls.from_config(self.model_config )
lowercase :Tuple = self.state_dict()
state_dict.pop('''shadow_params''' , snake_case__ )
model.register_to_config(**snake_case__ )
self.copy_to(model.parameters() )
model.save_pretrained(snake_case__ )
def __snake_case ( self : int , snake_case__ : int ):
'''simple docstring'''
lowercase :Union[str, Any] = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
lowercase :int = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
lowercase :Dict = (1 + step) / (1_0 + step)
lowercase :Optional[int] = min(snake_case__ , self.decay )
# make sure decay is not smaller than min_decay
lowercase :Optional[int] = max(snake_case__ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def __snake_case ( self : Any , snake_case__ : Iterable[torch.nn.Parameter] ):
'''simple docstring'''
if isinstance(snake_case__ , torch.nn.Module ):
lowercase :Tuple = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ , )
lowercase :Union[str, Any] = parameters.parameters()
lowercase :Optional[Any] = list(snake_case__ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
lowercase :List[Any] = self.get_decay(self.optimization_step )
lowercase :Optional[Any] = decay
lowercase :List[Any] = 1 - decay
lowercase :List[str] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , snake_case__ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
lowercase :Union[str, Any] = deepspeed.zero.GatheredParameters(snake_case__ , modifier_rank=snake_case__ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(snake_case__ )
def __snake_case ( self : str , snake_case__ : Iterable[torch.nn.Parameter] ):
'''simple docstring'''
lowercase :Optional[Any] = list(snake_case__ )
for s_param, param in zip(self.shadow_params , snake_case__ ):
param.data.copy_(s_param.to(param.device ).data )
def __snake_case ( self : Optional[int] , snake_case__ : Dict=None , snake_case__ : Dict=None ):
'''simple docstring'''
lowercase :str = [
p.to(device=snake_case__ , dtype=snake_case__ ) if p.is_floating_point() else p.to(device=snake_case__ )
for p in self.shadow_params
]
def __snake_case ( self : Dict ):
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def __snake_case ( self : Optional[int] , snake_case__ : Iterable[torch.nn.Parameter] ):
'''simple docstring'''
lowercase :str = [param.detach().cpu().clone() for param in parameters]
def __snake_case ( self : List[Any] , snake_case__ : Iterable[torch.nn.Parameter] ):
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''' )
for c_param, param in zip(self.temp_stored_params , snake_case__ ):
param.data.copy_(c_param.data )
# Better memory-wise.
lowercase :Dict = None
def __snake_case ( self : Union[str, Any] , snake_case__ : dict ):
'''simple docstring'''
lowercase :List[str] = copy.deepcopy(snake_case__ )
lowercase :Any = state_dict.get('''decay''' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('''Decay must be between 0 and 1''' )
lowercase :int = state_dict.get('''min_decay''' , self.min_decay )
if not isinstance(self.min_decay , snake_case__ ):
raise ValueError('''Invalid min_decay''' )
lowercase :List[Any] = state_dict.get('''optimization_step''' , self.optimization_step )
if not isinstance(self.optimization_step , snake_case__ ):
raise ValueError('''Invalid optimization_step''' )
lowercase :int = state_dict.get('''update_after_step''' , self.update_after_step )
if not isinstance(self.update_after_step , snake_case__ ):
raise ValueError('''Invalid update_after_step''' )
lowercase :Optional[int] = state_dict.get('''use_ema_warmup''' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , snake_case__ ):
raise ValueError('''Invalid use_ema_warmup''' )
lowercase :Any = state_dict.get('''inv_gamma''' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('''Invalid inv_gamma''' )
lowercase :Dict = state_dict.get('''power''' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('''Invalid power''' )
lowercase :Optional[int] = state_dict.get('''shadow_params''' , snake_case__ )
if shadow_params is not None:
lowercase :List[Any] = shadow_params
if not isinstance(self.shadow_params , snake_case__ ):
raise ValueError('''shadow_params must be a list''' )
if not all(isinstance(snake_case__ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('''shadow_params must all be Tensors''' )
| 677 | 0 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Dict , __lowercase : Union[str, Any] , __lowercase : Optional[int]=3 , __lowercase : Any=32 , __lowercase : List[str]=3 , __lowercase : Tuple=10 , __lowercase : Dict=[8, 16, 32, 64] , __lowercase : int=[1, 1, 2, 1] , __lowercase : Any=True , __lowercase : Dict=True , __lowercase : Optional[int]="relu" , __lowercase : Any=3 , __lowercase : int=None , __lowercase : Tuple=["stage2", "stage3", "stage4"] , __lowercase : List[str]=[2, 3, 4] , __lowercase : Tuple=1 , ):
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = num_channels
snake_case_ = embeddings_size
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_act
snake_case_ = num_labels
snake_case_ = scope
snake_case_ = len(__lowercase )
snake_case_ = out_features
snake_case_ = out_indices
snake_case_ = num_groups
def snake_case__ ( self : int ):
"""simple docstring"""
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.num_labels )
snake_case_ = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self : str ):
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def snake_case__ ( self : Dict , __lowercase : Dict , __lowercase : List[Any] , __lowercase : int ):
"""simple docstring"""
snake_case_ = BitModel(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case_ = model(__lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def snake_case__ ( self : Tuple , __lowercase : List[str] , __lowercase : List[Any] , __lowercase : Tuple ):
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = BitForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
snake_case_ = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : Optional[Any] , __lowercase : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : Tuple ):
"""simple docstring"""
snake_case_ = BitBackbone(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case_ = model(__lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
snake_case_ = None
snake_case_ = BitBackbone(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case_ = model(__lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
lowerCAmelCase_ = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def snake_case__ ( self : str ):
"""simple docstring"""
snake_case_ = BitModelTester(self )
snake_case_ = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
return
@unittest.skip(reason="Bit does not output attentions" )
def snake_case__ ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason="Bit does not use inputs_embeds" )
def snake_case__ ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason="Bit does not support input and output embeddings" )
def snake_case__ ( self : Tuple ):
"""simple docstring"""
pass
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(__lowercase )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowercase )
def snake_case__ ( self : Dict ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def snake_case__ ( self : Dict ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowercase )
def snake_case__ ( self : Any ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(config=__lowercase )
for name, module in model.named_modules():
if isinstance(__lowercase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
def snake_case__ ( self : Union[str, Any] ):
"""simple docstring"""
def check_hidden_states_output(__lowercase : int , __lowercase : Optional[int] , __lowercase : Union[str, Any] ):
snake_case_ = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(__lowercase , __lowercase ) )
snake_case_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case_ = self.model_tester.num_stages
self.assertEqual(len(__lowercase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = ["preactivation", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
snake_case_ = layer_type
snake_case_ = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
@unittest.skip(reason="Bit does not use feedforward chunking" )
def snake_case__ ( self : str ):
"""simple docstring"""
pass
def snake_case__ ( self : List[str] ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
@slow
def snake_case__ ( self : Dict ):
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = BitModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self : List[str] ):
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def snake_case__ ( self : int ):
"""simple docstring"""
snake_case_ = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowercase )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=__lowercase , return_tensors="pt" ).to(__lowercase )
# forward pass
with torch.no_grad():
snake_case_ = model(**__lowercase )
# verify the logits
snake_case_ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowercase )
snake_case_ = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) )
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (BitBackbone,) if is_torch_available() else ()
lowerCAmelCase_ = BitConfig
lowerCAmelCase_ = False
def snake_case__ ( self : Tuple ):
"""simple docstring"""
snake_case_ = BitModelTester(self )
| 706 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Optional[Any] = logging.get_logger(__name__)
def lowerCamelCase__ ( _A , _A=False ):
'''simple docstring'''
snake_case_ = []
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token") )
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") )
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") )
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case_ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
# fmt: on
return rename_keys
def lowerCamelCase__ ( _A , _A , _A=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
snake_case_ = ""
else:
snake_case_ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
snake_case_ = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case_ = in_proj_weight[
: config.hidden_size, :
]
snake_case_ = in_proj_bias[: config.hidden_size]
snake_case_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_A , _A )
def lowerCamelCase__ ( _A , _A , _A ):
'''simple docstring'''
snake_case_ = dct.pop(_A )
snake_case_ = val
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case_ = Image.open(requests.get(_A , stream=_A ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( _A , _A , _A=False ):
'''simple docstring'''
snake_case_ = BitConfig(
global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=_A , )
snake_case_ = ViTHybridConfig(backbone_config=_A , image_size=384 , num_labels=1000 )
snake_case_ = False
# load original model from timm
snake_case_ = timm.create_model(_A , pretrained=_A )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case_ = timm_model.state_dict()
if base_model:
remove_classification_head_(_A )
snake_case_ = create_rename_keys(_A , _A )
for src, dest in rename_keys:
rename_key(_A , _A , _A )
read_in_q_k_v(_A , _A , _A )
snake_case_ = "huggingface/label-files"
snake_case_ = "imagenet-1k-id2label.json"
snake_case_ = json.load(open(hf_hub_download(_A , _A , repo_type="dataset" ) , "r" ) )
snake_case_ = {int(_A ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case_ = ViTHybridModel(_A ).eval()
else:
snake_case_ = ViTHybridForImageClassification(_A ).eval()
model.load_state_dict(_A )
# create image processor
snake_case_ = create_transform(**resolve_data_config({} , model=_A ) )
snake_case_ = transform.transforms
snake_case_ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
snake_case_ = ViTHybridImageProcessor(
do_resize=_A , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_A , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=_A , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case_ = prepare_img()
snake_case_ = transform(_A ).unsqueeze(0 )
snake_case_ = processor(_A , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(_A , _A )
# verify logits
with torch.no_grad():
snake_case_ = model(_A )
snake_case_ = outputs.logits
print("Predicted class:" , logits.argmax(-1 ).item() )
if base_model:
snake_case_ = timm_model.forward_features(_A )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_A , outputs.pooler_output , atol=1E-3 )
else:
snake_case_ = timm_model(_A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_A , outputs.logits , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(_A ).mkdir(exist_ok=_A )
print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_A )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_A )
if push_to_hub:
print(f"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(f"ybelkada/{vit_name}" )
processor.push_to_hub(f"ybelkada/{vit_name}" )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
lowercase__ : List[Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 139 | 0 |
'''simple docstring'''
def __a ( lowerCAmelCase__ : int ):
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a__ : List[Any] = F'Input value of [number={number}] must be an integer'
raise TypeError(lowerCAmelCase__ )
if number < 1:
a__ : Any = F'Input value of [number={number}] must be > 0'
raise ValueError(lowerCAmelCase__ )
a__ : List[Any] = 1
for i in range(1 , lowerCAmelCase__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 688 |
'''simple docstring'''
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
__SCREAMING_SNAKE_CASE = open # noqa: we just need to have a builtin inside this module to test it properly
| 688 | 1 |
'''simple docstring'''
from __future__ import annotations
snake_case_ = list[tuple[int, int]]
snake_case_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
snake_case_ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class a__ :
def __init__(self : List[Any], __UpperCAmelCase : int, __UpperCAmelCase : int, __UpperCAmelCase : int, __UpperCAmelCase : int, __UpperCAmelCase : float, __UpperCAmelCase : Node | None, ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = pos_x
SCREAMING_SNAKE_CASE : Optional[int] = pos_y
SCREAMING_SNAKE_CASE : Dict = (pos_y, pos_x)
SCREAMING_SNAKE_CASE : List[str] = goal_x
SCREAMING_SNAKE_CASE : List[Any] = goal_y
SCREAMING_SNAKE_CASE : str = g_cost
SCREAMING_SNAKE_CASE : int = parent
SCREAMING_SNAKE_CASE : int = self.calculate_heuristic()
def lowercase__ (self : int ) -> float:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = abs(self.pos_x - self.goal_x )
SCREAMING_SNAKE_CASE : int = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__(self : List[str], __UpperCAmelCase : Any ) -> bool:
"""simple docstring"""
return self.f_cost < other.f_cost
class a__ :
def __init__(self : Tuple, __UpperCAmelCase : tuple[int, int], __UpperCAmelCase : tuple[int, int] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = Node(start[1], start[0], goal[1], goal[0], 0, __UpperCAmelCase )
SCREAMING_SNAKE_CASE : Dict = Node(goal[1], goal[0], goal[1], goal[0], 99999, __UpperCAmelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = [self.start]
SCREAMING_SNAKE_CASE : list[Node] = []
SCREAMING_SNAKE_CASE : int = False
def lowercase__ (self : Union[str, Any] ) -> Path | None:
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
SCREAMING_SNAKE_CASE : int = True
return self.retrace_path(__UpperCAmelCase )
self.closed_nodes.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[str] = self.get_successors(__UpperCAmelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__UpperCAmelCase )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE : Tuple = self.open_nodes.pop(self.open_nodes.index(__UpperCAmelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__UpperCAmelCase )
else:
self.open_nodes.append(__UpperCAmelCase )
if not self.reached:
return [self.start.pos]
return None
def lowercase__ (self : Tuple, __UpperCAmelCase : Node ) -> list[Node]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = []
for action in delta:
SCREAMING_SNAKE_CASE : Dict = parent.pos_x + action[1]
SCREAMING_SNAKE_CASE : str = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__UpperCAmelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__UpperCAmelCase, __UpperCAmelCase, self.target.pos_y, self.target.pos_x, parent.g_cost + 1, __UpperCAmelCase, ) )
return successors
def lowercase__ (self : Optional[int], __UpperCAmelCase : Node | None ) -> Path:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = node
SCREAMING_SNAKE_CASE : Optional[int] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
SCREAMING_SNAKE_CASE : Any = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
snake_case_ = (0, 0)
snake_case_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("""------""")
snake_case_ = GreedyBestFirst(init, goal)
snake_case_ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
snake_case_ = 2
for elem in grid:
print(elem)
| 717 |
'''simple docstring'''
def __lowercase (_SCREAMING_SNAKE_CASE :int ):
SCREAMING_SNAKE_CASE : Tuple = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def __lowercase (_SCREAMING_SNAKE_CASE :int ):
SCREAMING_SNAKE_CASE : List[Any] = 0
while number > 0:
SCREAMING_SNAKE_CASE : List[str] = number % 10
sum_of_digits += last_digit
SCREAMING_SNAKE_CASE : List[str] = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def __lowercase (_SCREAMING_SNAKE_CASE :int = 1_00 ):
SCREAMING_SNAKE_CASE : List[Any] = factorial(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[str] = split_and_add(_SCREAMING_SNAKE_CASE )
return result
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 355 | 0 |
def lowercase__ ( __snake_case : int , __snake_case : int ):
'''simple docstring'''
return number | (1 << position)
def lowercase__ ( __snake_case : int , __snake_case : int ):
'''simple docstring'''
return number & ~(1 << position)
def lowercase__ ( __snake_case : int , __snake_case : int ):
'''simple docstring'''
return number ^ (1 << position)
def lowercase__ ( __snake_case : int , __snake_case : int ):
'''simple docstring'''
return ((number >> position) & 1) == 1
def lowercase__ ( __snake_case : int , __snake_case : int ):
'''simple docstring'''
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 406 |
'''simple docstring'''
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
snake_case_ : Tuple = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: ')))
print('Googling.....')
snake_case_ : Union[str, Any] = F'''https://www.google.com/search?q={query}&num=100'''
snake_case_ : Optional[int] = requests.get(
url,
headers={'User-Agent': str(UserAgent().random)},
)
try:
snake_case_ : int = (
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'yuRUbf'})
.find('a')
.get('href')
)
except AttributeError:
snake_case_ : List[str] = parse_qs(
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'kCrYT'})
.find('a')
.get('href')
)['url'][0]
webbrowser.open(link)
| 212 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a__ : Dict = logging.get_logger(__name__)
a__ : Union[str, Any] = {
"SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Optional[int] = "deformable_detr"
A : str = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : List[Any] , lowerCAmelCase : Any=True , lowerCAmelCase : Tuple=None , lowerCAmelCase : Any=3 , lowerCAmelCase : Any=3_00 , lowerCAmelCase : Tuple=10_24 , lowerCAmelCase : str=6 , lowerCAmelCase : Tuple=10_24 , lowerCAmelCase : List[str]=8 , lowerCAmelCase : List[str]=6 , lowerCAmelCase : Any=10_24 , lowerCAmelCase : List[str]=8 , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Any=True , lowerCAmelCase : int="relu" , lowerCAmelCase : List[Any]=2_56 , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[Any]=0.0 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : List[str]=0.02 , lowerCAmelCase : List[str]=1.0 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : List[Any]=False , lowerCAmelCase : int="sine" , lowerCAmelCase : int="resnet50" , lowerCAmelCase : Dict=True , lowerCAmelCase : Tuple=False , lowerCAmelCase : Dict=4 , lowerCAmelCase : Tuple=4 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : Any=False , lowerCAmelCase : Tuple=3_00 , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Union[str, Any]=1 , lowerCAmelCase : int=5 , lowerCAmelCase : str=2 , lowerCAmelCase : List[str]=1 , lowerCAmelCase : Dict=1 , lowerCAmelCase : Optional[int]=5 , lowerCAmelCase : Any=2 , lowerCAmelCase : Any=0.1 , lowerCAmelCase : int=0.25 , lowerCAmelCase : Optional[Any]=False , **lowerCAmelCase : str , ) -> str:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.')
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
lowercase__ = CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(lowerCAmelCase , lowerCAmelCase):
lowercase__ = backbone_config.get('model_type')
lowercase__ = CONFIG_MAPPING[backbone_model_type]
lowercase__ = config_class.from_dict(lowerCAmelCase)
lowercase__ = use_timm_backbone
lowercase__ = backbone_config
lowercase__ = num_channels
lowercase__ = num_queries
lowercase__ = max_position_embeddings
lowercase__ = d_model
lowercase__ = encoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = init_xavier_std
lowercase__ = encoder_layerdrop
lowercase__ = auxiliary_loss
lowercase__ = position_embedding_type
lowercase__ = backbone
lowercase__ = use_pretrained_backbone
lowercase__ = dilation
# deformable attributes
lowercase__ = num_feature_levels
lowercase__ = encoder_n_points
lowercase__ = decoder_n_points
lowercase__ = two_stage
lowercase__ = two_stage_num_proposals
lowercase__ = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.')
# Hungarian matcher
lowercase__ = class_cost
lowercase__ = bbox_cost
lowercase__ = giou_cost
# Loss coefficients
lowercase__ = mask_loss_coefficient
lowercase__ = dice_loss_coefficient
lowercase__ = bbox_loss_coefficient
lowercase__ = giou_loss_coefficient
lowercase__ = eos_coefficient
lowercase__ = focal_alpha
lowercase__ = disable_custom_kernels
super().__init__(is_encoder_decoder=lowerCAmelCase , **lowerCAmelCase)
@property
def UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCAmelCase ( self : str) -> int:
"""simple docstring"""
return self.d_model
def UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
lowercase__ = copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
lowercase__ = self.backbone_config.to_dict()
lowercase__ = self.__class__.model_type
return output
| 703 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : List[Any] = None
A : Optional[int] = None
@property
def UpperCAmelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
return self.feat_extract_tester.prepare_feat_extract_dict()
def UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(lowerCAmelCase , 'feature_size'))
self.assertTrue(hasattr(lowerCAmelCase , 'sampling_rate'))
self.assertTrue(hasattr(lowerCAmelCase , 'padding_value'))
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
self.assertTrue(all(len(lowerCAmelCase) == len(lowerCAmelCase) for x, y in zip(lowerCAmelCase , processed_features[input_name])))
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='np')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_torch
def UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='pt')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_tf
def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='tf')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
def UpperCAmelCase ( self : str , lowerCAmelCase : str=False) -> Union[str, Any]:
"""simple docstring"""
def _inputs_have_equal_length(lowerCAmelCase : int):
lowercase__ = len(input[0])
for input_slice in input[1:]:
if len(lowerCAmelCase) != length:
return False
return True
def _inputs_are_equal(lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple):
if len(lowerCAmelCase) != len(lowerCAmelCase):
return False
for input_slice_a, input_slice_a in zip(lowerCAmelCase , lowerCAmelCase):
if not np.allclose(np.asarray(lowerCAmelCase) , np.asarray(lowerCAmelCase) , atol=1E-3):
return False
return True
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = self.feat_extract_tester.seq_length_diff
lowercase__ = self.feat_extract_tester.max_seq_length + pad_diff
lowercase__ = self.feat_extract_tester.min_seq_length
lowercase__ = self.feat_extract_tester.batch_size
lowercase__ = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
lowercase__ = feat_extract.pad(lowerCAmelCase , padding=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest')
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[-1]))
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')
lowercase__ = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='max_length')[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
self.assertTrue(len(input_a[0]) == pad_min_length)
self.assertTrue(len(input_a[1]) == pad_min_length + pad_diff)
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0])))
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size)
# test padding for `pad_to_multiple_of` for List[int] + numpy
lowercase__ = feat_extract.pad(lowerCAmelCase , pad_to_multiple_of=10)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , pad_to_multiple_of=10)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase , return_tensors='np' , )
lowercase__ = input_a[input_name]
self.assertTrue(all(len(lowerCAmelCase) % 10 == 0 for x in input_a))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
lowercase__ = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(lowerCAmelCase) == expected_mult_pad_length for x in input_a))
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size)
# Check padding value is correct
lowercase__ = (np.ones(self.feat_extract_tester.feature_size) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0])[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length))
< 1E-3)
self.assertTrue(
abs(
np.asarray(input_a[1])[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff))
< 1E-3)
self.assertTrue(
abs(
np.asarray(input_a[2])[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff))
< 1E-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1E-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length))
< 1E-3)
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Dict=False) -> str:
"""simple docstring"""
def _inputs_have_equal_length(lowerCAmelCase : int):
lowercase__ = len(input[0])
for input_slice in input[1:]:
if len(lowerCAmelCase) != length:
return False
return True
def _inputs_are_equal(lowerCAmelCase : str , lowerCAmelCase : Optional[Any]):
if len(lowerCAmelCase) != len(lowerCAmelCase):
return False
for input_slice_a, input_slice_a in zip(lowerCAmelCase , lowerCAmelCase):
if not np.allclose(np.asarray(lowerCAmelCase) , np.asarray(lowerCAmelCase) , atol=1E-3):
return False
return True
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
# truncate to smallest
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , truncation=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]))
lowercase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
# truncate to smallest with np
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np' , truncation=lowerCAmelCase , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(input_a.shape[1] == len(speech_inputs[0]))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
# truncate to middle
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=lowerCAmelCase , return_tensors='np' , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1]))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(len(input_a[-1]) == len(speech_inputs[-1]))
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , truncation=lowerCAmelCase)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='longest' , truncation=lowerCAmelCase)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='longest' , truncation=lowerCAmelCase)[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='max_length' , truncation=lowerCAmelCase)[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
lowercase__ = 12
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=lowerCAmelCase , truncation=lowerCAmelCase , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=lowerCAmelCase , )
lowercase__ = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
lowercase__ = len(speech_inputs[0])
if expected_length % pad_to_multiple_of != 0:
lowercase__ = ((len(speech_inputs[0]) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0]) == expected_length)
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
self._check_padding(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
self._check_padding(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : List[Any]) -> int:
"""simple docstring"""
self._check_truncation(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
self._check_truncation(numpify=lowerCAmelCase)
@require_torch
def UpperCAmelCase ( self : Dict) -> List[str]:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='pt')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_pt.numpy().astype(np.floataa).sum()) < 1E-2)
@require_tf
def UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='tf')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_tf.numpy().astype(np.floataa).sum()) < 1E-2)
def UpperCAmelCase ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
lowercase__ = self.feat_extract_dict
lowercase__ = True
lowercase__ = self.feature_extraction_class(**lowerCAmelCase)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = [len(lowerCAmelCase) for x in speech_inputs]
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')
self.assertIn('attention_mask' , lowerCAmelCase)
self.assertListEqual(list(processed.attention_mask.shape) , list(processed[input_name].shape[:2]))
self.assertListEqual(processed.attention_mask.sum(-1).tolist() , lowerCAmelCase)
def UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
lowercase__ = self.feat_extract_dict
lowercase__ = True
lowercase__ = self.feature_extraction_class(**lowerCAmelCase)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = [len(lowerCAmelCase) for x in speech_inputs]
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = min(lowerCAmelCase)
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , truncation=lowerCAmelCase , return_tensors='np')
self.assertIn('attention_mask' , lowerCAmelCase)
self.assertListEqual(
list(processed_pad.attention_mask.shape) , [processed_pad[input_name].shape[0], max_length])
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1).tolist() , [max_length for x in speech_inputs])
| 642 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCAmelCase : List[Any] = {
"""configuration_conditional_detr""": [
"""CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ConditionalDetrConfig""",
"""ConditionalDetrOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Union[str, Any] = ["""ConditionalDetrFeatureExtractor"""]
_lowerCAmelCase : int = ["""ConditionalDetrImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = [
"""CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConditionalDetrForObjectDetection""",
"""ConditionalDetrForSegmentation""",
"""ConditionalDetrModel""",
"""ConditionalDetrPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 438 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : Tuple = {
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''mra'''
def __init__( self : Any , snake_case__ : List[str]=5_0_2_6_5 , snake_case__ : Any=7_6_8 , snake_case__ : Union[str, Any]=1_2 , snake_case__ : Optional[Any]=1_2 , snake_case__ : Tuple=3_0_7_2 , snake_case__ : str="gelu" , snake_case__ : Any=0.1 , snake_case__ : Tuple=0.1 , snake_case__ : Tuple=5_1_2 , snake_case__ : Union[str, Any]=1 , snake_case__ : List[Any]=0.02 , snake_case__ : str=1e-5 , snake_case__ : List[Any]="absolute" , snake_case__ : str=4 , snake_case__ : List[str]="full" , snake_case__ : Tuple=0 , snake_case__ : Any=0 , snake_case__ : Union[str, Any]=1 , snake_case__ : int=0 , snake_case__ : int=2 , **snake_case__ : List[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
UpperCAmelCase__ : List[Any] = vocab_size
UpperCAmelCase__ : str = max_position_embeddings
UpperCAmelCase__ : Any = hidden_size
UpperCAmelCase__ : Union[str, Any] = num_hidden_layers
UpperCAmelCase__ : str = num_attention_heads
UpperCAmelCase__ : int = intermediate_size
UpperCAmelCase__ : int = hidden_act
UpperCAmelCase__ : List[str] = hidden_dropout_prob
UpperCAmelCase__ : List[str] = attention_probs_dropout_prob
UpperCAmelCase__ : Any = initializer_range
UpperCAmelCase__ : Any = type_vocab_size
UpperCAmelCase__ : Dict = layer_norm_eps
UpperCAmelCase__ : Tuple = position_embedding_type
UpperCAmelCase__ : List[str] = block_per_row
UpperCAmelCase__ : Optional[Any] = approx_mode
UpperCAmelCase__ : Any = initial_prior_first_n_blocks
UpperCAmelCase__ : List[Any] = initial_prior_diagonal_n_blocks
| 438 | 1 |
from collections import defaultdict
from math import gcd
def a__ (__lowercase :int = 150_0000 ) -> int:
_A : defaultdict = defaultdict(__lowercase )
_A : Dict = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , __lowercase , 2 ):
if gcd(__lowercase , __lowercase ) > 1:
continue
_A : List[Any] = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__lowercase , limit + 1 , __lowercase ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 332 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
_UpperCamelCase : Any ={'configuration_vit': ['VIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTConfig', 'ViTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Union[str, Any] =['ViTFeatureExtractor']
_UpperCamelCase : Any =['ViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Any =[
'VIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTForImageClassification',
'ViTForMaskedImageModeling',
'ViTModel',
'ViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : int =[
'TFViTForImageClassification',
'TFViTModel',
'TFViTPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[Any] =[
'FlaxViTForImageClassification',
'FlaxViTModel',
'FlaxViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
_UpperCamelCase : Any =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 332 | 1 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
UpperCamelCase = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["DPTFeatureExtractor"]
UpperCamelCase = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__A : str = random.Random()
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
"""simple docstring"""
if rng is None:
_A = global_rng
_A = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=400 , snake_case_=2000 , snake_case_=2048 , snake_case_=128 , snake_case_=1 , snake_case_=512 , snake_case_=30 , snake_case_=4_4100 , ):
_A = parent
_A = batch_size
_A = min_seq_length
_A = max_seq_length
_A = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A = spectrogram_length
_A = feature_size
_A = num_audio_channels
_A = hop_length
_A = chunk_length
_A = sampling_rate
def lowerCAmelCase__ ( self ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def lowerCAmelCase__ ( self , snake_case_=False , snake_case_=False ):
def _flatten(snake_case_ ):
return list(itertools.chain(*snake_case_ ) )
if equal_length:
_A = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A = [np.asarray(snake_case_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase( __snake_case , unittest.TestCase ):
'''simple docstring'''
__magic_name__ = TvltFeatureExtractor
def lowerCAmelCase__ ( self ):
_A = TvltFeatureExtractionTester(self )
def lowerCAmelCase__ ( self ):
_A = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(snake_case_ , 'spectrogram_length' ) )
self.assertTrue(hasattr(snake_case_ , 'feature_size' ) )
self.assertTrue(hasattr(snake_case_ , 'num_audio_channels' ) )
self.assertTrue(hasattr(snake_case_ , 'hop_length' ) )
self.assertTrue(hasattr(snake_case_ , 'chunk_length' ) )
self.assertTrue(hasattr(snake_case_ , 'sampling_rate' ) )
def lowerCAmelCase__ ( self ):
_A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A = feat_extract_first.save_pretrained(snake_case_ )[0]
check_json_file_has_correct_format(snake_case_ )
_A = self.feature_extraction_class.from_pretrained(snake_case_ )
_A = feat_extract_first.to_dict()
_A = feat_extract_second.to_dict()
_A = dict_first.pop('mel_filters' )
_A = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(snake_case_ , snake_case_ ) )
self.assertEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A = os.path.join(snake_case_ , 'feat_extract.json' )
feat_extract_first.to_json_file(snake_case_ )
_A = self.feature_extraction_class.from_json_file(snake_case_ )
_A = feat_extract_first.to_dict()
_A = feat_extract_second.to_dict()
_A = dict_first.pop('mel_filters' )
_A = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(snake_case_ , snake_case_ ) )
self.assertEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self ):
# Initialize feature_extractor
_A = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_A = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A = [np.asarray(snake_case_ ) for speech_input in speech_inputs]
# Test not batched input
_A = feature_extractor(np_speech_inputs[0] , return_tensors='np' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_A = feature_extractor(snake_case_ , return_tensors='np' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_A = feature_extractor(
snake_case_ , return_tensors='np' , sampling_rate=4_4100 , mask_audio=snake_case_ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_A = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A = np.asarray(snake_case_ )
_A = feature_extractor(snake_case_ , return_tensors='np' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def lowerCAmelCase__ ( self , snake_case_ ):
_A = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
_A = ds.sort('id' ).select(range(snake_case_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def lowerCAmelCase__ ( self ):
_A = self._load_datasamples(1 )
_A = TvltFeatureExtractor()
_A = feature_extractor(snake_case_ , return_tensors='pt' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
_A = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , snake_case_ , atol=1E-4 ) )
| 27 | 0 |
def UpperCamelCase ( _A : list , _A : list )-> float:
"""simple docstring"""
_validate_point(_A )
_validate_point(_A )
if len(_A ) != len(_A ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(_A , _A ) ) )
def UpperCamelCase ( _A : list[float] )-> None:
"""simple docstring"""
if point:
if isinstance(_A , _A ):
for item in point:
if not isinstance(_A , (int, float) ):
A__ = (
"Expected a list of numbers as input, found "
f"""{type(_A ).__name__}"""
)
raise TypeError(_A )
else:
A__ = f"""Expected a list of numbers as input, found {type(_A ).__name__}"""
raise TypeError(_A )
else:
raise ValueError("Missing an input" )
def UpperCamelCase ( _A : list , _A : list )-> float:
"""simple docstring"""
_validate_point(_A )
_validate_point(_A )
if len(_A ) != len(_A ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(_A , _A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 232 |
UpperCAmelCase_ : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def UpperCamelCase ( )-> None:
"""simple docstring"""
A__ = input("Enter message: " )
A__ = input("Enter key [alphanumeric]: " )
A__ = input("Encrypt/Decrypt [e/d]: " )
if mode.lower().startswith("e" ):
A__ = "encrypt"
A__ = encrypt_message(_A , _A )
elif mode.lower().startswith("d" ):
A__ = "decrypt"
A__ = decrypt_message(_A , _A )
print(f"""\n{mode.title()}ed message:""" )
print(_A )
def UpperCamelCase ( _A : str , _A : str )-> str:
"""simple docstring"""
return translate_message(_A , _A , "encrypt" )
def UpperCamelCase ( _A : str , _A : str )-> str:
"""simple docstring"""
return translate_message(_A , _A , "decrypt" )
def UpperCamelCase ( _A : str , _A : str , _A : str )-> str:
"""simple docstring"""
A__ = []
A__ = 0
A__ = key.upper()
for symbol in message:
A__ = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(_A )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(_A ):
A__ = 0
else:
translated.append(_A )
return "".join(_A )
if __name__ == "__main__":
main()
| 232 | 1 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class lowerCamelCase (unittest.TestCase , __lowerCamelCase ):
"""simple docstring"""
def A_ ( self : int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = load_tool("text-to-speech" )
self.tool.setup()
def A_ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple = self.tool("hey" )
SCREAMING_SNAKE_CASE__ : Optional[int] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3], torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485] ), ) )
def A_ ( self : int ) -> Dict:
"""simple docstring"""
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[str] = self.tool("hey" )
SCREAMING_SNAKE_CASE__ : Tuple = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3], torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485] ), ) )
| 663 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :Any , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any]=7 , lowerCamelCase__ :str=3 , lowerCamelCase__ :Optional[Any]=18 , lowerCamelCase__ :List[str]=30 , lowerCamelCase__ :str=4_00 , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :Union[str, Any]=32 , lowerCamelCase__ :int=True , ):
UpperCamelCase__ :List[Any] = parent
UpperCamelCase__ :List[Any] = batch_size
UpperCamelCase__ :Any = num_channels
UpperCamelCase__ :List[str] = image_size
UpperCamelCase__ :Dict = min_resolution
UpperCamelCase__ :List[str] = max_resolution
UpperCamelCase__ :str = do_resize
UpperCamelCase__ :int = size_divisor
UpperCamelCase__ :Optional[int] = do_rescale
def __a ( self :str ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Optional[int] = GLPNImageProcessor if is_vision_available() else None
def __a ( self :Dict ):
UpperCamelCase__ :Dict = GLPNImageProcessingTester(self )
@property
def __a ( self :List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self :Optional[int] ):
UpperCamelCase__ :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """size_divisor""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """resample""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """do_rescale""" ) )
def __a ( self :Optional[int] ):
pass
def __a ( self :Tuple ):
# Initialize image_processing
UpperCamelCase__ :int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ :str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ :Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __a ( self :str ):
# Initialize image_processing
UpperCamelCase__ :str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ :List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __a ( self :Any ):
# Initialize image_processing
UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ :List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) | 45 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A_ ( __a , unittest.TestCase ):
_A :Union[str, Any] = DanceDiffusionPipeline
_A :Optional[int] = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
_A :Dict = PipelineTesterMixin.required_optional_params - {
'''callback''',
'''latents''',
'''callback_steps''',
'''output_type''',
'''num_images_per_prompt''',
}
_A :Any = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
_A :List[Any] = False
_A :List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self : Any ):
torch.manual_seed(0 )
lowercase = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=5_12 , sample_rate=1_60_00 , in_channels=2 , out_channels=2 , flip_sin_to_cos=snake_case__ , use_timestep_embedding=snake_case__ , time_embedding_type="""fourier""" , mid_block_type="""UNetMidBlock1D""" , down_block_types=("""DownBlock1DNoSkip""", """DownBlock1D""", """AttnDownBlock1D""") , up_block_types=("""AttnUpBlock1D""", """UpBlock1D""", """UpBlock1DNoSkip""") , )
lowercase = IPNDMScheduler()
lowercase = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case__ : Tuple , snake_case__ : Optional[Any]=0 ):
if str(snake_case__ ).startswith("""mps""" ):
lowercase = torch.manual_seed(snake_case__ )
else:
lowercase = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowercase = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 4,
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = DanceDiffusionPipeline(**snake_case__ )
lowercase = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase = self.get_dummy_inputs(snake_case__ )
lowercase = pipe(**snake_case__ )
lowercase = output.audios
lowercase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
lowercase = np.array([-0.7_265, 1.0_000, -0.8_388, 0.1_175, 0.9_498, -1.0_000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return super().test_save_load_local()
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self : str ):
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
return super().test_save_load_optional_components()
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
return super().test_attention_slicing_forward_pass()
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowercase = torch_device
lowercase = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" )
lowercase = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase = torch.manual_seed(0 )
lowercase = pipe(generator=snake_case__ , num_inference_steps=1_00 , audio_length_in_s=4.096 )
lowercase = output.audios
lowercase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowercase = np.array([-0.0_192, -0.0_231, -0.0_318, -0.0_059, 0.0_002, -0.0_020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowercase = torch_device
lowercase = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" , torch_dtype=torch.floataa )
lowercase = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase = torch.manual_seed(0 )
lowercase = pipe(generator=snake_case__ , num_inference_steps=1_00 , audio_length_in_s=4.096 )
lowercase = output.audios
lowercase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowercase = np.array([-0.0_367, -0.0_488, -0.0_771, -0.0_525, -0.0_444, -0.0_341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 700 |
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__SCREAMING_SNAKE_CASE : Any =logging.get_logger(__name__)
# General docstring
__SCREAMING_SNAKE_CASE : Union[str, Any] ='''PoolFormerConfig'''
# Base docstring
__SCREAMING_SNAKE_CASE : List[Any] ='''sail/poolformer_s12'''
__SCREAMING_SNAKE_CASE : Union[str, Any] =[1, 512, 7, 7]
# Image classification docstring
__SCREAMING_SNAKE_CASE : Any ='''sail/poolformer_s12'''
__SCREAMING_SNAKE_CASE : Union[str, Any] ='''tabby, tabby cat'''
__SCREAMING_SNAKE_CASE : Tuple =[
'''sail/poolformer_s12''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ = 0.0 ,lowerCAmelCase__ = False ):
if drop_prob == 0.0 or not training:
return input
lowercase = 1 - drop_prob
lowercase = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
lowercase = keep_prob + torch.rand(lowerCAmelCase__ ,dtype=input.dtype ,device=input.device )
random_tensor.floor_() # binarize
lowercase = input.div(lowerCAmelCase__ ) * random_tensor
return output
class A_ ( nn.Module ):
def __init__( self : Union[str, Any] , snake_case__ : Optional[float] = None ):
super().__init__()
lowercase = drop_prob
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : torch.Tensor ):
return drop_path(snake_case__ , self.drop_prob , self.training )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return "p={}".format(self.drop_prob )
class A_ ( nn.Module ):
def __init__( self : int , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Tuple , snake_case__ : str , snake_case__ : List[str]=None ):
super().__init__()
lowercase = patch_size if isinstance(snake_case__ , collections.abc.Iterable ) else (patch_size, patch_size)
lowercase = stride if isinstance(snake_case__ , collections.abc.Iterable ) else (stride, stride)
lowercase = padding if isinstance(snake_case__ , collections.abc.Iterable ) else (padding, padding)
lowercase = nn.Convad(snake_case__ , snake_case__ , kernel_size=snake_case__ , stride=snake_case__ , padding=snake_case__ )
lowercase = norm_layer(snake_case__ ) if norm_layer else nn.Identity()
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : List[Any] ):
lowercase = self.projection(snake_case__ )
lowercase = self.norm(snake_case__ )
return embeddings
class A_ ( nn.GroupNorm ):
def __init__( self : Union[str, Any] , snake_case__ : Dict , **snake_case__ : List[str] ):
super().__init__(1 , snake_case__ , **snake_case__ )
class A_ ( nn.Module ):
def __init__( self : int , snake_case__ : Any ):
super().__init__()
lowercase = nn.AvgPoolad(snake_case__ , stride=1 , padding=pool_size // 2 , count_include_pad=snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : Union[str, Any] ):
return self.pool(snake_case__ ) - hidden_states
class A_ ( nn.Module ):
def __init__( self : int , snake_case__ : Any , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Dict ):
super().__init__()
lowercase = nn.Convad(snake_case__ , snake_case__ , 1 )
lowercase = nn.Convad(snake_case__ , snake_case__ , 1 )
lowercase = PoolFormerDropPath(snake_case__ )
if isinstance(config.hidden_act , snake_case__ ):
lowercase = ACTaFN[config.hidden_act]
else:
lowercase = config.hidden_act
def SCREAMING_SNAKE_CASE__ ( self : int , snake_case__ : Dict ):
lowercase = self.conva(snake_case__ )
lowercase = self.act_fn(snake_case__ )
lowercase = self.drop(snake_case__ )
lowercase = self.conva(snake_case__ )
lowercase = self.drop(snake_case__ )
return hidden_states
class A_ ( nn.Module ):
def __init__( self : int , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : List[str] ):
super().__init__()
lowercase = PoolFormerPooling(snake_case__ )
lowercase = PoolFormerOutput(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowercase = PoolFormerGroupNorm(snake_case__ )
lowercase = PoolFormerGroupNorm(snake_case__ )
# Useful for training neural nets
lowercase = PoolFormerDropPath(snake_case__ ) if drop_path > 0.0 else nn.Identity()
lowercase = config.use_layer_scale
if config.use_layer_scale:
lowercase = nn.Parameter(
config.layer_scale_init_value * torch.ones((snake_case__) ) , requires_grad=snake_case__ )
lowercase = nn.Parameter(
config.layer_scale_init_value * torch.ones((snake_case__) ) , requires_grad=snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : List[str] ):
if self.use_layer_scale:
lowercase = self.pooling(self.before_norm(snake_case__ ) )
lowercase = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
lowercase = hidden_states + self.drop_path(snake_case__ )
lowercase = ()
lowercase = self.output(self.after_norm(snake_case__ ) )
lowercase = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
lowercase = hidden_states + self.drop_path(snake_case__ )
lowercase = (output,) + outputs
return outputs
else:
lowercase = self.drop_path(self.pooling(self.before_norm(snake_case__ ) ) )
# First residual connection
lowercase = pooling_output + hidden_states
lowercase = ()
# Second residual connection inside the PoolFormerOutput block
lowercase = self.drop_path(self.output(self.after_norm(snake_case__ ) ) )
lowercase = hidden_states + layer_output
lowercase = (output,) + outputs
return outputs
class A_ ( nn.Module ):
def __init__( self : List[str] , snake_case__ : Optional[Any] ):
super().__init__()
lowercase = config
# stochastic depth decay rule
lowercase = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
lowercase = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
lowercase = nn.ModuleList(snake_case__ )
# Transformer blocks
lowercase = []
lowercase = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
lowercase = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
snake_case__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(snake_case__ ) )
lowercase = nn.ModuleList(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : str , snake_case__ : Optional[Any]=False , snake_case__ : Optional[int]=True ):
lowercase = () if output_hidden_states else None
lowercase = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
lowercase , lowercase = layers
# Get patch embeddings from hidden_states
lowercase = embedding_layer(snake_case__ )
# Send the embeddings through the blocks
for _, blk in enumerate(snake_case__ ):
lowercase = blk(snake_case__ )
lowercase = layer_outputs[0]
if output_hidden_states:
lowercase = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=snake_case__ , hidden_states=snake_case__ )
class A_ ( __a ):
_A :Any = PoolFormerConfig
_A :int = '''poolformer'''
_A :Union[str, Any] = '''pixel_values'''
_A :str = True
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : Union[str, Any] ):
if isinstance(snake_case__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(snake_case__ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : Any , snake_case__ : Optional[int]=False ):
if isinstance(snake_case__ , snake_case__ ):
lowercase = value
__SCREAMING_SNAKE_CASE : Optional[Any] =R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__SCREAMING_SNAKE_CASE : str =R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
'''
@add_start_docstrings(
'''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''' , __a , )
class A_ ( __a ):
def __init__( self : Union[str, Any] , snake_case__ : int ):
super().__init__(snake_case__ )
lowercase = config
lowercase = PoolFormerEncoder(snake_case__ )
# Initialize weights and apply final processing
self.post_init()
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(snake_case__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[bool] = None , ):
lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
lowercase = self.encoder(
snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , )
lowercase = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=snake_case__ , hidden_states=encoder_outputs.hidden_states , )
class A_ ( nn.Module ):
def __init__( self : List[str] , snake_case__ : Optional[int] ):
super().__init__()
lowercase = nn.Linear(config.hidden_size , config.hidden_size )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : str ):
lowercase = self.dense(snake_case__ )
return output
@add_start_docstrings(
'''
PoolFormer Model transformer with an image classification head on top
''' , __a , )
class A_ ( __a ):
def __init__( self : Dict , snake_case__ : Any ):
super().__init__(snake_case__ )
lowercase = config.num_labels
lowercase = PoolFormerModel(snake_case__ )
# Final norm
lowercase = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
lowercase = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(snake_case__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[torch.LongTensor] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[bool] = None , ):
lowercase = return_dict if return_dict is not None else self.config.use_return_dict
lowercase = self.poolformer(
snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , )
lowercase = outputs[0]
lowercase = self.classifier(self.norm(snake_case__ ).mean([-2, -1] ) )
lowercase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase = """single_label_classification"""
else:
lowercase = """multi_label_classification"""
if self.config.problem_type == "regression":
lowercase = MSELoss()
if self.num_labels == 1:
lowercase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase = loss_fct(snake_case__ , snake_case__ )
elif self.config.problem_type == "single_label_classification":
lowercase = CrossEntropyLoss()
lowercase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase = BCEWithLogitsLoss()
lowercase = loss_fct(snake_case__ , snake_case__ )
if not return_dict:
lowercase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=snake_case__ , logits=snake_case__ , hidden_states=outputs.hidden_states )
| 72 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_snake_case : Tuple = StableDiffusionLDMaDPipeline
_snake_case : str = TEXT_TO_IMAGE_PARAMS
_snake_case : List[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
_snake_case : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
def snake_case__ ( self : List[str] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
_UpperCamelCase = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , )
torch.manual_seed(0 )
_UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_UpperCamelCase = CLIPTextModel(lowerCAmelCase__ )
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_UpperCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str]=0 ) -> Tuple:
'''simple docstring'''
if str(lowerCAmelCase__ ).startswith('''mps''' ):
_UpperCamelCase = torch.manual_seed(lowerCAmelCase__ )
else:
_UpperCamelCase = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
_UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = StableDiffusionLDMaDPipeline(**lowerCAmelCase__ )
_UpperCamelCase = ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase = self.get_dummy_inputs(lowerCAmelCase__ )
_UpperCamelCase = ldmad_pipe(**lowerCAmelCase__ )
_UpperCamelCase , _UpperCamelCase = output.rgb, output.depth
_UpperCamelCase = rgb[0, -3:, -3:, -1]
_UpperCamelCase = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_UpperCamelCase = np.array(
[0.37338176, 0.70247, 0.74203193, 0.51643604, 0.58256793, 0.60932136, 0.4181095, 0.48355877, 0.46535262] )
_UpperCamelCase = np.array([103.46727, 85.812004, 87.849236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def snake_case__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = StableDiffusionLDMaDPipeline(**lowerCAmelCase__ )
_UpperCamelCase = ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase = self.get_dummy_inputs(lowerCAmelCase__ )
_UpperCamelCase = 3 * [inputs['''prompt''']]
# forward
_UpperCamelCase = ldmad_pipe(**lowerCAmelCase__ )
_UpperCamelCase , _UpperCamelCase = output.rgb, output.depth
_UpperCamelCase = rgb_slice_a[0, -3:, -3:, -1]
_UpperCamelCase = depth_slice_a[0, -3:, -1]
_UpperCamelCase = self.get_dummy_inputs(lowerCAmelCase__ )
_UpperCamelCase = 3 * [inputs.pop('''prompt''' )]
_UpperCamelCase = ldmad_pipe.tokenizer(
lowerCAmelCase__ , padding='''max_length''' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=lowerCAmelCase__ , return_tensors='''pt''' , )
_UpperCamelCase = text_inputs['''input_ids'''].to(lowerCAmelCase__ )
_UpperCamelCase = ldmad_pipe.text_encoder(lowerCAmelCase__ )[0]
_UpperCamelCase = prompt_embeds
# forward
_UpperCamelCase = ldmad_pipe(**lowerCAmelCase__ )
_UpperCamelCase , _UpperCamelCase = output.rgb, output.depth
_UpperCamelCase = rgb_slice_a[0, -3:, -3:, -1]
_UpperCamelCase = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def snake_case__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
_UpperCamelCase = StableDiffusionLDMaDPipeline(**lowerCAmelCase__ )
_UpperCamelCase = ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase = self.get_dummy_inputs(lowerCAmelCase__ )
_UpperCamelCase = '''french fries'''
_UpperCamelCase = ldmad_pipe(**lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ )
_UpperCamelCase , _UpperCamelCase = output.rgb, output.depth
_UpperCamelCase = rgb[0, -3:, -3:, -1]
_UpperCamelCase = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_UpperCamelCase = np.array(
[0.37044, 0.71811503, 0.7223251, 0.48603675, 0.5638391, 0.6364948, 0.42833704, 0.4901315, 0.47926217] )
_UpperCamelCase = np.array([107.84738, 84.62802, 89.962135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Any ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Any="cpu" , lowerCAmelCase__ : int=torch.floataa , lowerCAmelCase__ : Any=0 ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
_UpperCamelCase = np.random.RandomState(lowerCAmelCase__ ).standard_normal((1, 4, 64, 64) )
_UpperCamelCase = torch.from_numpy(lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ )
_UpperCamelCase = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self : Dict ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' )
_UpperCamelCase = ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase = self.get_inputs(lowerCAmelCase__ )
_UpperCamelCase = ldmad_pipe(**lowerCAmelCase__ )
_UpperCamelCase , _UpperCamelCase = output.rgb, output.depth
_UpperCamelCase = rgb[0, -3:, -3:, -1].flatten()
_UpperCamelCase = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
_UpperCamelCase = np.array(
[0.53805465, 0.56707305, 0.5486515, 0.57012236, 0.5814511, 0.56253487, 0.54843014, 0.55092263, 0.6459706] )
_UpperCamelCase = np.array(
[0.9263781, 0.6678672, 0.5486515, 0.92202145, 0.67831135, 0.56253487, 0.9241694, 0.7551478, 0.6459706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : str ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self : List[str] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any="cpu" , lowerCAmelCase__ : Dict=torch.floataa , lowerCAmelCase__ : Any=0 ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
_UpperCamelCase = np.random.RandomState(lowerCAmelCase__ ).standard_normal((1, 4, 64, 64) )
_UpperCamelCase = torch.from_numpy(lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ )
_UpperCamelCase = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 50,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self : Optional[int] ) -> int:
'''simple docstring'''
_UpperCamelCase = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' ).to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase = self.get_inputs(lowerCAmelCase__ )
_UpperCamelCase = ldmad_pipe(**lowerCAmelCase__ )
_UpperCamelCase , _UpperCamelCase = output.rgb, output.depth
_UpperCamelCase = 0.495586
_UpperCamelCase = 0.33795515
_UpperCamelCase = 112.48518
_UpperCamelCase = 98.489746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def snake_case__ ( self : Dict ) -> Any:
'''simple docstring'''
_UpperCamelCase = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d-4c''' ).to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase = self.get_inputs(lowerCAmelCase__ )
_UpperCamelCase = ldmad_pipe(**lowerCAmelCase__ )
_UpperCamelCase , _UpperCamelCase = output.rgb, output.depth
_UpperCamelCase = 0.4194127
_UpperCamelCase = 0.35375586
_UpperCamelCase = 0.5638502
_UpperCamelCase = 0.34686103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 98 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] ) -> str:
"""simple docstring"""
lowerCAmelCase = tmp_path / """file.csv"""
lowerCAmelCase = textwrap.dedent(
"""\
header1,header2
1,2
10,20
""" )
with open(_SCREAMING_SNAKE_CASE , """w""" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def _snake_case ( _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = tmp_path / """malformed_file.csv"""
lowerCAmelCase = textwrap.dedent(
"""\
header1,header2
1,2
10,20,
""" )
with open(_SCREAMING_SNAKE_CASE , """w""" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def _snake_case ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = tmp_path / """csv_with_image.csv"""
lowerCAmelCase = textwrap.dedent(
f'\\n image\n {image_file}\n ' )
with open(_SCREAMING_SNAKE_CASE , """w""" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def _snake_case ( _SCREAMING_SNAKE_CASE : int ) -> Any:
"""simple docstring"""
lowerCAmelCase = tmp_path / """csv_with_label.csv"""
lowerCAmelCase = textwrap.dedent(
"""\
label
good
bad
good
""" )
with open(_SCREAMING_SNAKE_CASE , """w""" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = tmp_path / """csv_with_int_list.csv"""
lowerCAmelCase = textwrap.dedent(
"""\
int_list
1 2 3
4 5 6
7 8 9
""" )
with open(_SCREAMING_SNAKE_CASE , """w""" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
def _snake_case ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = Csv()
lowerCAmelCase = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_SCREAMING_SNAKE_CASE , match="""Error tokenizing data""" ):
for _ in generator:
pass
assert any(
record.levelname == """ERROR"""
and """Failed to read file""" in record.message
and os.path.basename(_SCREAMING_SNAKE_CASE ) in record.message
for record in caplog.records )
@require_pil
def _snake_case ( _SCREAMING_SNAKE_CASE : Dict ) -> Tuple:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as f:
lowerCAmelCase = f.read().splitlines()[1]
lowerCAmelCase = Csv(encoding="""utf-8""" , features=Features({"""image""": Image()} ) )
lowerCAmelCase = csv._generate_tables([[csv_file_with_image]] )
lowerCAmelCase = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""image""" ).type == Image()()
lowerCAmelCase = pa_table.to_pydict()["""image"""]
assert generated_content == [{"path": image_file, "bytes": None}]
def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] ) -> Union[str, Any]:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as f:
lowerCAmelCase = f.read().splitlines()[1:]
lowerCAmelCase = Csv(encoding="""utf-8""" , features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) )
lowerCAmelCase = csv._generate_tables([[csv_file_with_label]] )
lowerCAmelCase = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )()
lowerCAmelCase = pa_table.to_pydict()["""label"""]
assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(_SCREAMING_SNAKE_CASE ) for label in labels]
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = Csv(encoding="""utf-8""" , sep=""",""" , converters={"""int_list""": lambda _SCREAMING_SNAKE_CASE : [int(_SCREAMING_SNAKE_CASE ) for i in x.split()]} )
lowerCAmelCase = csv._generate_tables([[csv_file_with_int_list]] )
lowerCAmelCase = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type )
lowerCAmelCase = pa_table.to_pydict()["""int_list"""]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]] | 433 | 0 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A : Union[str, Any] = LEDTokenizer
A : Optional[Any] = LEDTokenizerFast
A : Union[str, Any] = True
def _lowerCAmelCase ( self ) -> Tuple:
super().setUp()
snake_case_ : Union[str, Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
snake_case_ : Tuple = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
snake_case_ : Dict = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
snake_case_ : List[str] = {"unk_token": "<unk>"}
snake_case_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_SCREAMING_SNAKE_CASE ) )
def _lowerCAmelCase ( self , **_SCREAMING_SNAKE_CASE ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Any:
return "lower newer", "lower newer"
@cached_property
def _lowerCAmelCase ( self ) -> Optional[Any]:
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def _lowerCAmelCase ( self ) -> Optional[int]:
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def _lowerCAmelCase ( self ) -> Union[str, Any]:
snake_case_ : Any = ["A long paragraph for summarization.", "Another paragraph for summarization."]
snake_case_ : Tuple = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case_ : Tuple = tokenizer(_SCREAMING_SNAKE_CASE , max_length=len(_SCREAMING_SNAKE_CASE ) , padding=_SCREAMING_SNAKE_CASE , return_tensors="pt" )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
snake_case_ : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@require_torch
def _lowerCAmelCase ( self ) -> List[str]:
snake_case_ : int = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case_ : Union[str, Any] = tokenizer(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors="pt" )
self.assertIn("input_ids" , _SCREAMING_SNAKE_CASE )
self.assertIn("attention_mask" , _SCREAMING_SNAKE_CASE )
self.assertNotIn("labels" , _SCREAMING_SNAKE_CASE )
self.assertNotIn("decoder_attention_mask" , _SCREAMING_SNAKE_CASE )
@require_torch
def _lowerCAmelCase ( self ) -> Optional[int]:
snake_case_ : Any = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case_ : str = tokenizer(text_target=_SCREAMING_SNAKE_CASE , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def _lowerCAmelCase ( self ) -> int:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case_ : Tuple = tokenizer(
["I am a small frog" * 1024, "I am a small frog"] , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors="pt" )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def _lowerCAmelCase ( self ) -> List[str]:
snake_case_ : Optional[Any] = ["A long paragraph for summarization."]
snake_case_ : Any = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case_ : int = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors="pt" )
snake_case_ : Optional[int] = tokenizer(text_target=_SCREAMING_SNAKE_CASE , return_tensors="pt" )
snake_case_ : str = inputs["input_ids"]
snake_case_ : int = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def _lowerCAmelCase ( self ) -> Tuple:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case_ : int = ["Summary of the text.", "Another summary."]
snake_case_ : Dict = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
snake_case_ : str = tokenizer(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = [[0] * len(_SCREAMING_SNAKE_CASE ) for x in encoded_output["input_ids"]]
snake_case_ : Optional[int] = tokenizer.pad(_SCREAMING_SNAKE_CASE )
self.assertSequenceEqual(outputs["global_attention_mask"] , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Optional[Any]:
pass
def _lowerCAmelCase ( self ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ : int = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = self.tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = "A, <mask> AllenNLP sentence."
snake_case_ : int = tokenizer_r.encode_plus(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = tokenizer_p.encode_plus(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE )
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
snake_case_ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
snake_case_ : Optional[int] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
_SCREAMING_SNAKE_CASE , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
_SCREAMING_SNAKE_CASE , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 704 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Tuple = ['image_processor', 'tokenizer']
A : List[Any] = 'ViltImageProcessor'
A : Optional[Any] = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> int:
snake_case_ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _SCREAMING_SNAKE_CASE , )
snake_case_ : str = kwargs.pop("feature_extractor" )
snake_case_ : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : str = self.image_processor
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> BatchEncoding:
snake_case_ : List[Any] = self.tokenizer(
text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_overflowing_tokens=_SCREAMING_SNAKE_CASE , return_special_tokens_mask=_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , return_length=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# add pixel_values + pixel_mask
snake_case_ : str = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE )
encoding.update(_SCREAMING_SNAKE_CASE )
return encoding
def _lowerCAmelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : List[Any] = self.tokenizer.model_input_names
snake_case_ : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowerCAmelCase ( self ) -> Optional[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def _lowerCAmelCase ( self ) -> Optional[int]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _SCREAMING_SNAKE_CASE , )
return self.image_processor
| 114 | 0 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
__UpperCAmelCase : Optional[int] = str(bin(lowerCamelCase__ ) )
binary_number += "0" * shift_amount
return binary_number
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
__UpperCAmelCase : Dict = str(bin(lowerCamelCase__ ) )[2:]
if shift_amount >= len(lowerCamelCase__ ):
return "0b0"
__UpperCAmelCase : Optional[int] = binary_number[: len(lowerCamelCase__ ) - shift_amount]
return "0b" + shifted_binary_number
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> str:
"""simple docstring"""
if number >= 0: # Get binary representation of positive number
__UpperCAmelCase : Optional[Any] = "0" + str(bin(lowerCamelCase__ ) ).strip("-" )[2:]
else: # Get binary (2's complement) representation of negative number
__UpperCAmelCase : List[str] = len(bin(lowerCamelCase__ )[3:] ) # Find 2's complement of number
__UpperCAmelCase : List[str] = bin(abs(lowerCamelCase__ ) - (1 << binary_number_length) )[3:]
__UpperCAmelCase : List[str] = (
"1" + "0" * (binary_number_length - len(lowerCamelCase__ )) + binary_number
)
if shift_amount >= len(lowerCamelCase__ ):
return "0b" + binary_number[0] * len(lowerCamelCase__ )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(lowerCamelCase__ ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 168 | '''simple docstring'''
from __future__ import annotations
def _lowercase ( lowerCamelCase__ ) -> float:
"""simple docstring"""
__UpperCAmelCase : Any = 0.00
__UpperCAmelCase : Union[str, Any] = 0
for resistor in resistors:
if resistor <= 0:
__UpperCAmelCase : Tuple = f"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(lowerCamelCase__ )
first_sum += 1 / float(lowerCamelCase__ )
index += 1
return 1 / first_sum
def _lowercase ( lowerCamelCase__ ) -> float:
"""simple docstring"""
__UpperCAmelCase : int = 0.00
__UpperCAmelCase : List[str] = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
__UpperCAmelCase : Tuple = f"""Resistor at index {index} has a negative value!"""
raise ValueError(lowerCamelCase__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 168 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCamelCase : List[Any] = logging.get_logger(__name__)
lowerCamelCase : Union[str, Any] = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _UpperCamelCase (a_ ):
snake_case_ = """blenderbot-small"""
snake_case_ = ["""past_key_values"""]
snake_case_ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , __UpperCamelCase=5_0_2_6_5 , __UpperCamelCase=5_1_2 , __UpperCamelCase=8 , __UpperCamelCase=2_0_4_8 , __UpperCamelCase=1_6 , __UpperCamelCase=8 , __UpperCamelCase=2_0_4_8 , __UpperCamelCase=1_6 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase="gelu" , __UpperCamelCase=5_1_2 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0_2 , __UpperCamelCase=1 , __UpperCamelCase=False , __UpperCamelCase=0 , __UpperCamelCase=1 , __UpperCamelCase=2 , __UpperCamelCase=2 , **__UpperCamelCase , )-> Any:
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = d_model
__lowerCAmelCase = encoder_ffn_dim
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = encoder_attention_heads
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = activation_function
__lowerCAmelCase = init_std
__lowerCAmelCase = encoder_layerdrop
__lowerCAmelCase = decoder_layerdrop
__lowerCAmelCase = use_cache
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , forced_eos_token_id=__UpperCamelCase , **__UpperCamelCase , )
class _UpperCamelCase (a_ ):
@property
def __UpperCAmelCase ( self )-> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
__lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__lowerCAmelCase = {0: "batch"}
__lowerCAmelCase = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
__lowerCAmelCase = {0: "batch", 1: "decoder_sequence"}
__lowerCAmelCase = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__UpperCamelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__lowerCAmelCase , __lowerCAmelCase = self.num_layers
for i in range(__UpperCamelCase ):
__lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
__lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
else:
__lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def __UpperCAmelCase ( self )-> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
__lowerCAmelCase = super().outputs
else:
__lowerCAmelCase = super(__UpperCamelCase , self ).outputs
if self.use_past:
__lowerCAmelCase , __lowerCAmelCase = self.num_layers
for i in range(__UpperCamelCase ):
__lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
__lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase = -1 , __UpperCamelCase = -1 , __UpperCamelCase = False , __UpperCamelCase = None , )-> Mapping[str, Any]:
__lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Generate decoder inputs
__lowerCAmelCase = seq_length if not self.use_past else 1
__lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__lowerCAmelCase = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
__lowerCAmelCase = dict(**__UpperCamelCase , **__UpperCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__lowerCAmelCase , __lowerCAmelCase = common_inputs["input_ids"].shape
__lowerCAmelCase = common_inputs["decoder_input_ids"].shape[1]
__lowerCAmelCase , __lowerCAmelCase = self.num_attention_heads
__lowerCAmelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowerCAmelCase = decoder_seq_length + 3
__lowerCAmelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowerCAmelCase = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase )] , dim=1 )
__lowerCAmelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowerCAmelCase , __lowerCAmelCase = self.num_layers
__lowerCAmelCase = min(__UpperCamelCase , __UpperCamelCase )
__lowerCAmelCase = max(__UpperCamelCase , __UpperCamelCase ) - min_num_layers
__lowerCAmelCase = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__UpperCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
) )
# TODO: test this.
__lowerCAmelCase = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__UpperCamelCase , __UpperCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) )
return common_inputs
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase = -1 , __UpperCamelCase = -1 , __UpperCamelCase = False , __UpperCamelCase = None , )-> Mapping[str, Any]:
__lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__lowerCAmelCase , __lowerCAmelCase = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__lowerCAmelCase = seqlen + 2
__lowerCAmelCase , __lowerCAmelCase = self.num_layers
__lowerCAmelCase , __lowerCAmelCase = self.num_attention_heads
__lowerCAmelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowerCAmelCase = common_inputs["attention_mask"].dtype
__lowerCAmelCase = torch.cat(
[common_inputs["attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase , dtype=__UpperCamelCase )] , dim=1 )
__lowerCAmelCase = [
(torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) for _ in range(__UpperCamelCase )
]
return common_inputs
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase = -1 , __UpperCamelCase = -1 , __UpperCamelCase = False , __UpperCamelCase = None , )-> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowerCAmelCase = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowerCAmelCase = tokenizer.num_special_tokens_to_add(__UpperCamelCase )
__lowerCAmelCase = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCamelCase )
# Generate dummy inputs according to compute batch and sequence
__lowerCAmelCase = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowerCAmelCase = dict(tokenizer(__UpperCamelCase , return_tensors=__UpperCamelCase ) )
return common_inputs
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase = -1 , __UpperCamelCase = -1 , __UpperCamelCase = False , __UpperCamelCase = None , )-> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
__lowerCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
elif self.task == "causal-lm":
__lowerCAmelCase = self._generate_dummy_inputs_for_causal_lm(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
else:
__lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
return common_inputs
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[Any]:
if self.task in ["default", "seq2seq-lm"]:
__lowerCAmelCase = super()._flatten_past_key_values_(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
__lowerCAmelCase = super(__UpperCamelCase , self )._flatten_past_key_values_(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
| 290 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : str = logging.get_logger(__name__)
def __lowerCAmelCase ( __snake_case ):
# initialize config
if "resnet-50" in model_name:
__lowerCAmelCase = ResNetConfig.from_pretrained("microsoft/resnet-50" )
elif "resnet-101" in model_name:
__lowerCAmelCase = ResNetConfig.from_pretrained("microsoft/resnet-101" )
else:
raise ValueError("Model name should include either resnet50 or resnet101" )
__lowerCAmelCase = DetrConfig(use_timm_backbone=__snake_case , backbone_config=__snake_case )
# set label attributes
__lowerCAmelCase = "panoptic" in model_name
if is_panoptic:
__lowerCAmelCase = 250
else:
__lowerCAmelCase = 91
__lowerCAmelCase = "huggingface/label-files"
__lowerCAmelCase = "coco-detection-id2label.json"
__lowerCAmelCase = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase = {int(__snake_case ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def __lowerCAmelCase ( __snake_case ):
# here we list all keys to be renamed (original name on the left, our name on the right)
__lowerCAmelCase = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") )
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") )
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") )
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") )
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""",
F"""encoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias""") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""",
F"""decoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
) )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
) )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias""") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
] )
return rename_keys
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case ):
__lowerCAmelCase = state_dict.pop(__snake_case )
__lowerCAmelCase = val
def __lowerCAmelCase ( __snake_case , __snake_case=False ):
__lowerCAmelCase = ""
if is_panoptic:
__lowerCAmelCase = "detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
__lowerCAmelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[:256, :]
__lowerCAmelCase = in_proj_bias[:256]
__lowerCAmelCase = in_proj_weight[256:512, :]
__lowerCAmelCase = in_proj_bias[256:512]
__lowerCAmelCase = in_proj_weight[-256:, :]
__lowerCAmelCase = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
__lowerCAmelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
__lowerCAmelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[:256, :]
__lowerCAmelCase = in_proj_bias[:256]
__lowerCAmelCase = in_proj_weight[256:512, :]
__lowerCAmelCase = in_proj_bias[256:512]
__lowerCAmelCase = in_proj_weight[-256:, :]
__lowerCAmelCase = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
__lowerCAmelCase = state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
__lowerCAmelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
__lowerCAmelCase = in_proj_weight_cross_attn[:256, :]
__lowerCAmelCase = in_proj_bias_cross_attn[:256]
__lowerCAmelCase = in_proj_weight_cross_attn[256:512, :]
__lowerCAmelCase = in_proj_bias_cross_attn[256:512]
__lowerCAmelCase = in_proj_weight_cross_attn[-256:, :]
__lowerCAmelCase = in_proj_bias_cross_attn[-256:]
def __lowerCAmelCase ( ):
__lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCAmelCase = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase ( __snake_case , __snake_case=None , __snake_case=False ):
__lowerCAmelCase , __lowerCAmelCase = get_detr_config(__snake_case )
# load original model from torch hub
__lowerCAmelCase = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(F"""Converting model {model_name}...""" )
__lowerCAmelCase = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=__snake_case ).eval()
__lowerCAmelCase = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(__snake_case ):
if is_panoptic:
__lowerCAmelCase = "detr." + src
rename_key(__snake_case , __snake_case , __snake_case )
# query, key and value matrices need special treatment
read_in_q_k_v(__snake_case , is_panoptic=__snake_case )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__lowerCAmelCase = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
__lowerCAmelCase = state_dict.pop(__snake_case )
__lowerCAmelCase = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__lowerCAmelCase = state_dict.pop(__snake_case )
__lowerCAmelCase = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
__lowerCAmelCase = state_dict.pop(__snake_case )
__lowerCAmelCase = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
__lowerCAmelCase = state_dict.pop(__snake_case )
__lowerCAmelCase = val
# finally, create HuggingFace model and load state dict
__lowerCAmelCase = DetrForSegmentation(__snake_case ) if is_panoptic else DetrForObjectDetection(__snake_case )
model.load_state_dict(__snake_case )
model.eval()
# verify our conversion on an image
__lowerCAmelCase = "coco_panoptic" if is_panoptic else "coco_detection"
__lowerCAmelCase = DetrImageProcessor(format=__snake_case )
__lowerCAmelCase = processor(images=prepare_img() , return_tensors="pt" )
__lowerCAmelCase = encoding["pixel_values"]
__lowerCAmelCase = detr(__snake_case )
__lowerCAmelCase = model(__snake_case )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
model.save_pretrained(__snake_case )
processor.save_pretrained(__snake_case )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub..." )
model.push_to_hub(F"""nielsr/{model_name}""" )
processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''detr-resnet-50''',
type=str,
choices=['''detr-resnet-50''', '''detr-resnet-101'''],
help='''Name of the DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the model to the hub or not.''')
lowerCamelCase : int = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 290 | 1 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
lowercase : Dict =(EulerDiscreteScheduler,)
lowercase : List[str] =10
def UpperCamelCase ( self , **UpperCamelCase_ ):
lowercase_ :Any = {
"num_train_timesteps": 1100,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**lowerCAmelCase__ )
return config
def UpperCamelCase ( self ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def UpperCamelCase ( self ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ )
def UpperCamelCase ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCAmelCase__ )
def UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def UpperCamelCase ( self ):
lowercase_ :List[Any] = self.scheduler_classes[0]
lowercase_ :List[Any] = self.get_scheduler_config()
lowercase_ :Optional[Any] = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
lowercase_ :Tuple = torch.manual_seed(0 )
lowercase_ :List[Any] = self.dummy_model()
lowercase_ :Any = self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase_ :Optional[int] = sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
lowercase_ :Union[str, Any] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase_ :Any = model(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase_ :Tuple = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
lowercase_ :int = output.prev_sample
lowercase_ :Union[str, Any] = torch.sum(torch.abs(lowerCAmelCase__ ) )
lowercase_ :int = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = self.scheduler_classes[0]
lowercase_ :Dict = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowercase_ :str = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
lowercase_ :Tuple = torch.manual_seed(0 )
lowercase_ :Dict = self.dummy_model()
lowercase_ :List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase_ :Tuple = sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
lowercase_ :List[str] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase_ :Any = model(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase_ :List[str] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
lowercase_ :Optional[Any] = output.prev_sample
lowercase_ :Optional[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) )
lowercase_ :List[Any] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 0.0002 ) < 1E-2
assert abs(result_mean.item() - 2.2676E-06 ) < 1E-3
def UpperCamelCase ( self ):
lowercase_ :List[Any] = self.scheduler_classes[0]
lowercase_ :Dict = self.get_scheduler_config()
lowercase_ :str = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ )
lowercase_ :Tuple = torch.manual_seed(0 )
lowercase_ :Any = self.dummy_model()
lowercase_ :Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowercase_ :Optional[Any] = sample.to(lowerCAmelCase__ )
for t in scheduler.timesteps:
lowercase_ :Dict = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase_ :Tuple = model(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase_ :str = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
lowercase_ :Union[str, Any] = output.prev_sample
lowercase_ :List[str] = torch.sum(torch.abs(lowerCAmelCase__ ) )
lowercase_ :Union[str, Any] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def UpperCamelCase ( self ):
lowercase_ :Any = self.scheduler_classes[0]
lowercase_ :List[Any] = self.get_scheduler_config()
lowercase_ :List[Any] = scheduler_class(**lowerCAmelCase__ , use_karras_sigmas=lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ )
lowercase_ :int = torch.manual_seed(0 )
lowercase_ :Dict = self.dummy_model()
lowercase_ :int = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowercase_ :str = sample.to(lowerCAmelCase__ )
for t in scheduler.timesteps:
lowercase_ :Tuple = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase_ :Tuple = model(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase_ :Dict = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
lowercase_ :Tuple = output.prev_sample
lowercase_ :int = torch.sum(torch.abs(lowerCAmelCase__ ) )
lowercase_ :Any = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1E-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1E-3
| 257 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase):
_lowercase : Optional[Any] = KandinskyVaaInpaintPipeline
_lowercase : Union[str, Any] = ["""image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
_lowercase : Optional[Any] = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
_lowercase : Tuple = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_lowercase : List[str] = False
@property
def _lowercase ( self ) -> int:
'''simple docstring'''
return 3_2
@property
def _lowercase ( self ) -> int:
'''simple docstring'''
return 3_2
@property
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
return self.time_input_dim
@property
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
return 1_0_0
@property
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
a__ : Any ={
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
a__ : Dict =UNetaDConditionModel(**lowerCAmelCase__ )
return model
@property
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
a__ : Tuple =VQModel(**self.dummy_movq_kwargs )
return model
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ : Optional[int] =self.dummy_unet
a__ : List[str] =self.dummy_movq
a__ : Optional[Any] =DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , steps_offset=1 , prediction_type="epsilon" , thresholding=lowerCAmelCase__ , )
a__ : List[str] ={
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> Union[str, Any]:
'''simple docstring'''
a__ : Any =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
a__ : Tuple =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCAmelCase__ )
# create init_image
a__ : int =floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
a__ : str =image.cpu().permute(0 , 2 , 3 , 1 )[0]
a__ : Tuple =Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("RGB" ).resize((2_5_6, 2_5_6) )
# create mask
a__ : Dict =np.ones((6_4, 6_4) , dtype=np.floataa )
a__ : List[str] =0
if str(lowerCAmelCase__ ).startswith("mps" ):
a__ : str =torch.manual_seed(lowerCAmelCase__ )
else:
a__ : List[Any] =torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
a__ : Tuple ={
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 6_4,
"width": 6_4,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : List[Any] ="cpu"
a__ : str =self.get_dummy_components()
a__ : Tuple =self.pipeline_class(**lowerCAmelCase__ )
a__ : str =pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
a__ : Optional[Any] =pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
a__ : str =output.images
a__ : str =pipe(
**self.get_dummy_inputs(lowerCAmelCase__ ) , return_dict=lowerCAmelCase__ , )[0]
a__ : Dict =image[0, -3:, -3:, -1]
a__ : Any =image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 6_4, 6_4, 3)
a__ : int =np.array(
[0.50_77_59_03, 0.49_52_71_95, 0.48_82_45_43, 0.50_19_22_37, 0.48_64_49_06, 0.49_37_38_14, 0.4_78_05_98, 0.47_23_48_27, 0.48_32_78_48] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase):
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ) -> int:
'''simple docstring'''
a__ : Any =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy" )
a__ : List[str] =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
a__ : List[str] =np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
a__ : Any =0
a__ : Optional[int] ="a hat"
a__ : List[Any] =KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCAmelCase__ )
a__ : int =KandinskyVaaInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder-inpaint" , torch_dtype=torch.floataa )
a__ : List[str] =pipeline.to(lowerCAmelCase__ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase__ )
a__ : List[str] =torch.Generator(device="cpu" ).manual_seed(0 )
a__ , a__ : List[Any] =pipe_prior(
lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
a__ : List[Any] =pipeline(
image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , image_embeds=lowerCAmelCase__ , negative_image_embeds=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type="np" , )
a__ : int =output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
| 563 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _lowercase :
_lowerCamelCase = PegasusConfig
_lowerCamelCase = {}
_lowerCamelCase = '''gelu'''
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=2 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=40 , UpperCamelCase_=2 , UpperCamelCase_=1 , UpperCamelCase_=0 , ):
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = eos_token_id
__magic_name__ = pad_token_id
__magic_name__ = bos_token_id
def lowerCAmelCase__ ( self ):
__magic_name__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__magic_name__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__magic_name__ = tf.concat([input_ids, eos_tensor] , axis=1 )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__magic_name__ = prepare_pegasus_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, inputs_dict
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
__magic_name__ = TFPegasusModel(config=UpperCamelCase_ ).get_decoder()
__magic_name__ = inputs_dict["input_ids"]
__magic_name__ = input_ids[:1, :]
__magic_name__ = inputs_dict["attention_mask"][:1, :]
__magic_name__ = inputs_dict["head_mask"]
__magic_name__ = 1
# first forward pass
__magic_name__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ )
__magic_name__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__magic_name__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
__magic_name__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__magic_name__ = tf.concat([input_ids, next_tokens] , axis=-1 )
__magic_name__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__magic_name__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )[0]
__magic_name__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__magic_name__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__magic_name__ = output_from_no_past[:, -3:, random_slice_idx]
__magic_name__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase_ , UpperCamelCase_ , rtol=1E-3 )
def lowercase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , ) -> Tuple:
if attention_mask is None:
__magic_name__ = tf.cast(tf.math.not_equal(__snake_case , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__magic_name__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__magic_name__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__magic_name__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__magic_name__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _lowercase ( _A , _A , unittest.TestCase ):
_lowerCamelCase = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
_lowerCamelCase = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
_lowerCamelCase = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = False
def lowerCAmelCase__ ( self ):
__magic_name__ = TFPegasusModelTester(self )
__magic_name__ = ConfigTester(self , config_class=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
__magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class _lowercase ( unittest.TestCase ):
_lowerCamelCase = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
_lowerCamelCase = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
_lowerCamelCase = '''google/pegasus-xsum'''
@cached_property
def lowerCAmelCase__ ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCAmelCase__ ( self ):
__magic_name__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
__magic_name__ = self.translate_src_text(**UpperCamelCase_ )
assert self.expected_text == generated_words
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
__magic_name__ = self.tokenizer(self.src_text , **UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors='''tf''' )
__magic_name__ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=UpperCamelCase_ , )
__magic_name__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCamelCase_ )
return generated_words
@slow
def lowerCAmelCase__ ( self ):
self._assert_generated_batch_equal_expected()
| 713 |
"""simple docstring"""
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase ( __UpperCAmelCase , unittest.TestCase ):
_lowerCamelCase = FunnelTokenizer
_lowerCamelCase = FunnelTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = True
def lowerCAmelCase__ ( self ):
super().setUp()
__magic_name__ = [
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
__magic_name__ = '''UNwant\u00E9d,running'''
__magic_name__ = '''unwanted, running'''
return input_text, output_text
def lowerCAmelCase__ ( self ):
__magic_name__ = self.tokenizer_class(self.vocab_file )
__magic_name__ = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(UpperCamelCase_ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [7, 4, 5, 10, 8, 9] )
def lowerCAmelCase__ ( self ):
__magic_name__ = self.get_tokenizers(do_lower_case=UpperCamelCase_ )
for tokenizer in tokenizers:
__magic_name__ = tokenizer('''UNwant\u00E9d,running''' )
__magic_name__ = len(inputs['''input_ids'''] ) - 1
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len )
__magic_name__ = tokenizer('''UNwant\u00E9d,running''' , '''UNwant\u00E9d,running''' )
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len + [1] * sentence_len )
| 190 | 0 |
from __future__ import annotations
def A ( lowercase__ : int ) -> list[int]:
UpperCamelCase__ :Union[str, Any] = [True] * limit
UpperCamelCase__ :int = False
UpperCamelCase__ :Optional[Any] = False
UpperCamelCase__ :str = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
UpperCamelCase__ :List[Any] = i * 2
while index < limit:
UpperCamelCase__ :Tuple = False
UpperCamelCase__ :Tuple = index + i
UpperCamelCase__ :str = [2]
for i in range(3 , lowercase__ , 2 ):
if is_prime[i]:
primes.append(lowercase__ )
return primes
def A ( lowercase__ : int = 100_0000 ) -> int:
UpperCamelCase__ :Any = prime_sieve(lowercase__ )
UpperCamelCase__ :Optional[int] = 0
UpperCamelCase__ :Optional[Any] = 0
for i in range(len(lowercase__ ) ):
for j in range(i + length , len(lowercase__ ) ):
UpperCamelCase__ :Any = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
UpperCamelCase__ :Union[str, Any] = j - i
UpperCamelCase__ :Any = sol
return largest
if __name__ == "__main__":
print(f'''{solution() = }''') | 45 |
import random
def A ( lowercase__ : Dict , lowercase__ : str , lowercase__ : Optional[Any] ) -> int:
UpperCamelCase__ :List[Any] = a[left_index]
UpperCamelCase__ :Dict = left_index + 1
for j in range(left_index + 1 , lowercase__ ):
if a[j] < pivot:
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = a[i], a[j]
i += 1
UpperCamelCase__ , UpperCamelCase__ :Tuple = a[i - 1], a[left_index]
return i - 1
def A ( lowercase__ : Tuple , lowercase__ : Optional[int] , lowercase__ : Any ) -> Optional[int]:
if left < right:
UpperCamelCase__ :List[Any] = random.randint(lowercase__ , right - 1 )
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
UpperCamelCase__ :int = partition(lowercase__ , lowercase__ , lowercase__ )
quick_sort_random(
lowercase__ , lowercase__ , lowercase__ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
lowercase__ , pivot_index + 1 , lowercase__ ) # recursive quicksort to the right of the pivot point
def A ( ) -> List[Any]:
UpperCamelCase__ :str = input("""Enter numbers separated by a comma:\n""" ).strip()
UpperCamelCase__ :int = [int(lowercase__ ) for item in user_input.split(""",""" )]
quick_sort_random(lowercase__ , 0 , len(lowercase__ ) )
print(lowercase__ )
if __name__ == "__main__":
main() | 45 | 1 |
"""simple docstring"""
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class snake_case_ :
"""simple docstring"""
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=4 , __a="gelu" , __a=0.0 , __a=0.1 , __a=True , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_multiple_size
A__ = hidden_act
A__ = hidden_dropout
A__ = attention_dropout
A__ = weight_tying
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCAmelCase ( self ):
"""simple docstring"""
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ , A__ , A__ , A__ = self.prepare_config_and_inputs()
A__ = True
return config, input_ids, input_mask, token_labels
def _UpperCAmelCase ( self , __a , __a , __a ):
"""simple docstring"""
A__ = GPTNeoXJapaneseModel(config=__a )
model.to(__a )
model.eval()
A__ = model(__a , attention_mask=__a )
A__ = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , __a , __a , __a ):
"""simple docstring"""
A__ = True
A__ = GPTNeoXJapaneseModel(__a )
model.to(__a )
model.eval()
A__ = model(__a , attention_mask=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , __a , __a , __a , __a ):
"""simple docstring"""
A__ = GPTNeoXJapaneseForCausalLM(config=__a )
model.to(__a )
model.eval()
A__ = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , __a , __a , __a ):
"""simple docstring"""
A__ = True
A__ = GPTNeoXJapaneseForCausalLM(config=__a )
model.to(__a )
model.eval()
# first forward pass
A__ = model(__a , attention_mask=__a , use_cache=__a )
A__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
A__ = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ = torch.cat([input_mask, next_mask] , dim=-1 )
A__ = model(__a , attention_mask=__a , output_hidden_states=__a )
A__ = output_from_no_past['hidden_states'][0]
A__ = model(
__a , attention_mask=__a , past_key_values=__a , output_hidden_states=__a , )['hidden_states'][0]
# select random slice
A__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1E-3 ) )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ = config_and_inputs
A__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class snake_case_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: Optional[Any] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_: Optional[int] = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_: Optional[Any] = (
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_: int = False
SCREAMING_SNAKE_CASE_: Any = False
SCREAMING_SNAKE_CASE_: Dict = False
SCREAMING_SNAKE_CASE_: int = False
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = GPTNeoXJapaneseModelTester(self )
A__ = ConfigTester(self , config_class=__a , hidden_size=37 )
def _UpperCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__a , __a , __a )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__a , __a , __a )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs_for_decoder()
A__ = None
self.model_tester.create_and_check_model_as_decoder(__a , __a , __a )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__a , __a , __a )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__a )
@slow
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = 'abeja/gpt-neox-japanese-2.7b'
A__ = ['データサイエンティストとは、', '100年後に必要とされる会社は、', 'フルリモートの環境で働くために必要なことは、', '国境の長いトンネルを抜けると', '美味しい日本食といえば、']
A__ = [
'データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。',
'100年後に必要とされる会社は、「人」が中心の会社です。',
'フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。',
'国境の長いトンネルを抜けると、そこは雪国だった。',
'美味しい日本食といえば、やっぱりお寿司ですよね。',
]
A__ = GPTNeoXJapaneseTokenizer.from_pretrained(__a )
A__ = GPTNeoXJapaneseForCausalLM.from_pretrained(__a )
A__ = []
for prompt in prompts:
A__ = tokenizer(__a , return_tensors='pt' ).input_ids
A__ = model.generate(__a , max_length=50 )
A__ = tokenizer.batch_decode(__a , skip_special_tokens=__a )
predicted_outputs += generated_string
self.assertListEqual(__a , __a )
| 554 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class snake_case_ ( metaclass=_lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: List[str] = ["""note_seq"""]
def __init__( self , *__a , **__a ):
"""simple docstring"""
requires_backends(self , ['note_seq'] )
@classmethod
def _UpperCAmelCase ( cls , *__a , **__a ):
"""simple docstring"""
requires_backends(cls , ['note_seq'] )
@classmethod
def _UpperCAmelCase ( cls , *__a , **__a ):
"""simple docstring"""
requires_backends(cls , ['note_seq'] )
| 554 | 1 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
UpperCamelCase_ : int
UpperCamelCase_ : jnp.dtype = jnp.floataa
def _A ( self : int ):
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Optional[int] , UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = hidden_states.shape
SCREAMING_SNAKE_CASE : int = jax.image.resize(
UpperCAmelCase_ , shape=(batch, height * 2, width * 2, channels) , method="nearest" , )
SCREAMING_SNAKE_CASE : Optional[int] = self.conv(UpperCAmelCase_ )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
UpperCamelCase_ : int
UpperCamelCase_ : jnp.dtype = jnp.floataa
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : List[str] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Dict , UpperCAmelCase_ : Tuple ):
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
SCREAMING_SNAKE_CASE : int = self.conv(UpperCAmelCase_ )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
UpperCamelCase_ : int
UpperCamelCase_ : int = None
UpperCamelCase_ : float = 0.0
UpperCamelCase_ : bool = None
UpperCamelCase_ : jnp.dtype = jnp.floataa
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Any = self.in_channels if self.out_channels is None else self.out_channels
SCREAMING_SNAKE_CASE : Tuple = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
SCREAMING_SNAKE_CASE : Optional[int] = nn.Conv(
UpperCAmelCase_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
SCREAMING_SNAKE_CASE : Dict = nn.Dense(UpperCAmelCase_ , dtype=self.dtype )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Dropout(self.dropout_prob )
SCREAMING_SNAKE_CASE : Tuple = nn.Conv(
UpperCAmelCase_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
SCREAMING_SNAKE_CASE : Any = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
SCREAMING_SNAKE_CASE : Optional[int] = None
if use_nin_shortcut:
SCREAMING_SNAKE_CASE : str = nn.Conv(
UpperCAmelCase_ , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , )
def __call__( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str=True ):
SCREAMING_SNAKE_CASE : Any = hidden_states
SCREAMING_SNAKE_CASE : Tuple = self.norma(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = nn.swish(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.conva(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.time_emb_proj(nn.swish(UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = jnp.expand_dims(jnp.expand_dims(UpperCAmelCase_ , 1 ) , 1 )
SCREAMING_SNAKE_CASE : List[str] = hidden_states + temb
SCREAMING_SNAKE_CASE : Dict = self.norma(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = nn.swish(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = self.dropout(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.conva(UpperCAmelCase_ )
if self.conv_shortcut is not None:
SCREAMING_SNAKE_CASE : Tuple = self.conv_shortcut(UpperCAmelCase_ )
return hidden_states + residual
| 62 |
'''simple docstring'''
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
snake_case_ = re.compile(R'^(?P<major>\d+)' R'\.(?P<minor>\d+)' R'\.(?P<patch>\d+)$')
@total_ordering
@dataclass
class SCREAMING_SNAKE_CASE__ :
_A = 42
_A = None
_A = None
_A = None
_A = None
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[int] = _str_to_version_tuple(self.version_str )
def __repr__( self ):
"""simple docstring"""
return F"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return self.major, self.minor, self.patch
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
if isinstance(lowercase__ , lowercase__ ):
return Version(lowercase__ )
elif isinstance(lowercase__ , lowercase__ ):
return other
raise TypeError(F"{other} (type {type(lowercase__ )}) cannot be compared to version." )
def __eq__( self , lowercase__ ):
"""simple docstring"""
try:
SCREAMING_SNAKE_CASE_ : List[Any] = self._validate_operand(lowercase__ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self._validate_operand(lowercase__ )
return self.tuple < other.tuple
def __hash__( self ):
"""simple docstring"""
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def __lowerCamelCase ( cls , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def __lowerCamelCase ( self ):
"""simple docstring"""
return self.version_str
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = _VERSION_REG.match(SCREAMING_SNAKE_CASE_ )
if not res:
raise ValueError(F"Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits." )
return tuple(int(SCREAMING_SNAKE_CASE_ ) for v in [res.group("major" ), res.group("minor" ), res.group("patch" )] )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
return ".".join(str(SCREAMING_SNAKE_CASE_ ) for v in version_tuple )
| 421 | 0 |
"""simple docstring"""
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
a : Optional[Any] = {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
a : Dict = {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def lowercase__(A ) ->Union[str, Any]:
"""simple docstring"""
lowercase__ : int= (images / 2 + 0.5).clamp(0 , 1 )
lowercase__ : int= images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase__ : List[Any]= numpy_to_pil(A )
return images
def lowercase__(A ) ->List[str]:
"""simple docstring"""
if images.ndim == 3:
lowercase__ : List[Any]= images[None, ...]
lowercase__ : str= (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
lowercase__ : str= [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
lowercase__ : List[str]= [Image.fromarray(A ) for image in images]
return pil_images
| 85 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : int = logging.get_logger(__name__)
a : str = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "big_bird"
def __init__( self , snake_case__=50358 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu_new" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=4096 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=True , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=66 , snake_case__="block_sparse" , snake_case__=True , snake_case__=False , snake_case__=64 , snake_case__=3 , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , sep_token_id=snake_case__ , **snake_case__ , )
lowercase__ : Dict= vocab_size
lowercase__ : Optional[int]= max_position_embeddings
lowercase__ : List[Any]= hidden_size
lowercase__ : List[str]= num_hidden_layers
lowercase__ : List[str]= num_attention_heads
lowercase__ : Optional[int]= intermediate_size
lowercase__ : Optional[int]= hidden_act
lowercase__ : Tuple= hidden_dropout_prob
lowercase__ : int= attention_probs_dropout_prob
lowercase__ : int= initializer_range
lowercase__ : List[Any]= type_vocab_size
lowercase__ : Union[str, Any]= layer_norm_eps
lowercase__ : Optional[Any]= use_cache
lowercase__ : Union[str, Any]= rescale_embeddings
lowercase__ : Union[str, Any]= attention_type
lowercase__ : Any= use_bias
lowercase__ : List[Any]= block_size
lowercase__ : Optional[Any]= num_random_blocks
lowercase__ : Optional[int]= classifier_dropout
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase__ : List[Any]= {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ : Tuple= {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 85 | 1 |
"""simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class UpperCamelCase ( ctypes.Structure ):
"""simple docstring"""
_lowerCamelCase : int =[("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def a_ ( ):
if os.name == "nt":
A__ = CursorInfo()
A__ = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__a , ctypes.byref(__a ) )
A__ = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(__a , ctypes.byref(__a ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25l''' )
sys.stdout.flush()
def a_ ( ):
if os.name == "nt":
A__ = CursorInfo()
A__ = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__a , ctypes.byref(__a ) )
A__ = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(__a , ctypes.byref(__a ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25h''' )
sys.stdout.flush()
@contextmanager
def a_ ( ):
try:
hide_cursor()
yield
finally:
show_cursor()
| 571 |
UpperCamelCase = 256
# Modulus to hash a string
UpperCamelCase = 100_0003
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : Any = len(SCREAMING_SNAKE_CASE )
A_ : int = len(SCREAMING_SNAKE_CASE )
if p_len > t_len:
return False
A_ : int = 0
A_ : Dict = 0
A_ : Optional[int] = 1
# Calculating the hash of pattern and substring of text
for i in range(SCREAMING_SNAKE_CASE ):
A_ : List[Any] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
A_ : Dict = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
A_ : Tuple = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
A_ : List[str] = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _SCREAMING_SNAKE_CASE ( ):
A_ : List[Any] = '''abc1abc12'''
A_ : str = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
A_ : Optional[Any] = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and not rabin_karp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Test 2)
A_ : List[str] = '''ABABX'''
A_ : Tuple = '''ABABZABABYABABX'''
assert rabin_karp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Test 3)
A_ : Optional[int] = '''AAAB'''
A_ : Optional[Any] = '''ABAAAAAB'''
assert rabin_karp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Test 4)
A_ : Optional[Any] = '''abcdabcy'''
A_ : str = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Test 5)
A_ : Tuple = '''Lü'''
A_ : Dict = '''Lüsai'''
assert rabin_karp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ : Dict = '''Lue'''
assert not rabin_karp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 590 | 0 |
"""simple docstring"""
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
snake_case = argparse.ArgumentParser()
parser.add_argument('--user', type=str, default='ubuntu')
parser.add_argument('--host', type=str, default='localhost')
parser.add_argument('--key_path', type=str, default=None)
parser.add_argument('--instance', type=str, default='V100:1')
parser.add_argument('--provider', type=str, default='cheapest')
parser.add_argument('--use_spot', type=bool, default=False)
parser.add_argument('--example', type=str, default='pytorch/text-generation/run_generation.py')
snake_case = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('Cannot specify both BYO and on-demand cluster args')
snake_case = rh.cluster(
name='rh-cluster', ips=[args.host], ssh_creds={'ssh_user': args.user, 'ssh_private_key': args.key_path}
)
else:
snake_case = rh.cluster(
name='rh-cluster', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
snake_case = args.example.rsplit('/', 1)[0]
# Set up remote environment
cluster.install_packages(['pip:./']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f'pip install -r transformers/examples/{example_dir}/requirements.txt'])
cluster.run(['pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f'python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}'])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 709 |
"""simple docstring"""
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
snake_case = logging.get_logger(__name__)
snake_case = TypeVar('DatasetType', Dataset, IterableDataset)
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = "first_exhausted", ):
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(SCREAMING_SNAKE_CASE_ ):
if not isinstance(SCREAMING_SNAKE_CASE_, (Dataset, IterableDataset) ):
if isinstance(SCREAMING_SNAKE_CASE_, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
'is an empty dataset dictionary.' )
raise ValueError(
f'''Dataset at position {i} has at least one split: {list(SCREAMING_SNAKE_CASE_ )}\n'''
f'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(SCREAMING_SNAKE_CASE_ ) )}\']''' )
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(SCREAMING_SNAKE_CASE_ ).__name__}.''' )
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = (
(Dataset, IterableDataset) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else (IterableDataset, Dataset)
)
elif not isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
raise ValueError(
f'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f'''{stopping_strategy} is not supported. Please enter a valid stopping_strategy.''' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, info=SCREAMING_SNAKE_CASE_, split=SCREAMING_SNAKE_CASE_, stopping_strategy=SCREAMING_SNAKE_CASE_ )
else:
return _interleave_iterable_datasets(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, info=SCREAMING_SNAKE_CASE_, split=SCREAMING_SNAKE_CASE_, stopping_strategy=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = 0, ):
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(SCREAMING_SNAKE_CASE_ ):
if not isinstance(SCREAMING_SNAKE_CASE_, (Dataset, IterableDataset) ):
if isinstance(SCREAMING_SNAKE_CASE_, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
'is an empty dataset dictionary.' )
raise ValueError(
f'''Dataset at position {i} has at least one split: {list(SCREAMING_SNAKE_CASE_ )}\n'''
f'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(SCREAMING_SNAKE_CASE_ ) )}\']''' )
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(SCREAMING_SNAKE_CASE_ ).__name__}.''' )
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = (
(Dataset, IterableDataset) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else (IterableDataset, Dataset)
)
elif not isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
raise ValueError(
f'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(SCREAMING_SNAKE_CASE_, info=SCREAMING_SNAKE_CASE_, split=SCREAMING_SNAKE_CASE_, axis=SCREAMING_SNAKE_CASE_ )
else:
return _concatenate_iterable_datasets(SCREAMING_SNAKE_CASE_, info=SCREAMING_SNAKE_CASE_, split=SCREAMING_SNAKE_CASE_, axis=SCREAMING_SNAKE_CASE_ )
| 406 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def UpperCamelCase ( __lowercase : Union[str, Any] ):
'''simple docstring'''
A_ : Union[str, Any] = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class UpperCAmelCase ( __A , __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = StableDiffusionLatentUpscalePipeline
lowerCamelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''height''',
'''width''',
'''cross_attention_kwargs''',
'''negative_prompt_embeds''',
'''prompt_embeds''',
}
lowerCamelCase_ = PipelineTesterMixin.required_optional_params - {'''num_images_per_prompt'''}
lowerCamelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCamelCase_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCamelCase_ = frozenset([] )
lowerCamelCase_ = True
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = 1
A_ : int = 4
A_ : int = (1_6, 1_6)
A_ : Tuple = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowercase )
return image
def lowerCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : Dict = UNetaDConditionModel(
act_fn='gelu' , attention_head_dim=8 , norm_num_groups=lowercase , block_out_channels=[3_2, 3_2, 6_4, 6_4] , time_cond_proj_dim=1_6_0 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=3_2 , down_block_types=(
'KDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
) , in_channels=8 , mid_block_type=lowercase , only_cross_attention=lowercase , out_channels=5 , resnet_time_scale_shift='scale_shift' , time_embedding_type='fourier' , timestep_post_act='gelu' , up_block_types=('KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KUpBlock2D') , )
A_ : List[str] = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4, 6_4] , in_channels=3 , out_channels=3 , down_block_types=[
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
A_ : Tuple = EulerDiscreteScheduler(prediction_type='sample' )
A_ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='quick_gelu' , projection_dim=5_1_2 , )
A_ : Any = CLIPTextModel(lowercase )
A_ : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A_ : Any = {
'unet': model.eval(),
'vae': vae.eval(),
'scheduler': scheduler,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def lowerCAmelCase_ ( self , lowercase , lowercase=0 ):
"""simple docstring"""
if str(lowercase ).startswith('mps' ):
A_ : Union[str, Any] = torch.manual_seed(lowercase )
else:
A_ : Optional[int] = torch.Generator(device=lowercase ).manual_seed(lowercase )
A_ : Optional[int] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': self.dummy_image.cpu(),
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = 'cpu'
A_ : int = self.get_dummy_components()
A_ : Any = self.pipeline_class(**lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : List[str] = self.get_dummy_inputs(lowercase )
A_ : int = pipe(**lowercase ).images
A_ : Any = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_5_6, 2_5_6, 3) )
A_ : Dict = np.array(
[0.4722_2412, 0.4192_1633, 0.4471_7434, 0.4687_4192, 0.4258_8258, 0.4615_0726, 0.467_7534, 0.4558_3832, 0.4857_9055] )
A_ : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowercase , 1E-3 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = [
'DDIMScheduler',
'DDPMScheduler',
'PNDMScheduler',
'HeunDiscreteScheduler',
'EulerAncestralDiscreteScheduler',
'KDPM2DiscreteScheduler',
'KDPM2AncestralDiscreteScheduler',
'DPMSolverSDEScheduler',
]
A_ : Union[str, Any] = self.get_dummy_components()
A_ : Any = self.pipeline_class(**lowercase )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : Union[str, Any] = self.get_dummy_inputs(lowercase )
A_ : Any = 2
A_ : List[str] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
A_ : Dict = getattr(lowercase , scheduler_enum.name )
A_ : List[Any] = scheduler_cls.from_config(pipe.scheduler.config )
A_ : List[Any] = pipe(**lowercase )[0]
outputs.append(lowercase )
assert check_same_shape(lowercase )
@require_torch_gpu
@slow
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = torch.manual_seed(3_3 )
A_ : Optional[Any] = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' , torch_dtype=torch.floataa )
pipe.to('cuda' )
A_ : List[str] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa )
upscaler.to('cuda' )
A_ : Optional[Any] = 'a photo of an astronaut high resolution, unreal engine, ultra realistic'
A_ : Optional[int] = pipe(lowercase , generator=lowercase , output_type='latent' ).images
A_ : int = upscaler(
prompt=lowercase , image=lowercase , num_inference_steps=2_0 , guidance_scale=0 , generator=lowercase , output_type='np' , ).images[0]
A_ : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy' )
assert np.abs((expected_image - image).mean() ) < 5E-2
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = torch.manual_seed(3_3 )
A_ : List[str] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa )
upscaler.to('cuda' )
A_ : List[str] = 'the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'
A_ : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png' )
A_ : Optional[int] = upscaler(
prompt=lowercase , image=lowercase , num_inference_steps=2_0 , guidance_scale=0 , generator=lowercase , output_type='np' , ).images[0]
A_ : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy' )
assert np.abs((expected_image - image).max() ) < 5E-2
| 558 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""google/bit-50""": """https://huggingface.co/google/bit-50/resolve/main/config.json""",
}
class UpperCAmelCase ( __A , __A ):
'''simple docstring'''
lowerCamelCase_ = '''bit'''
lowerCamelCase_ = ['''preactivation''', '''bottleneck''']
lowerCamelCase_ = ['''SAME''', '''VALID''']
def __init__( self , lowercase=3 , lowercase=6_4 , lowercase=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , lowercase=[3, 4, 6, 3] , lowercase="preactivation" , lowercase="relu" , lowercase=None , lowercase=3_2 , lowercase=0.0 , lowercase=False , lowercase=3_2 , lowercase=1 , lowercase=None , lowercase=None , **lowercase , ):
"""simple docstring"""
super().__init__(**lowercase )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
A_ : Tuple = global_padding.upper()
else:
raise ValueError(F'''Padding strategy {global_padding} not supported''' )
A_ : Any = num_channels
A_ : Any = embedding_size
A_ : List[Any] = hidden_sizes
A_ : int = depths
A_ : Union[str, Any] = layer_type
A_ : List[Any] = hidden_act
A_ : Tuple = global_padding
A_ : List[str] = num_groups
A_ : int = drop_path_rate
A_ : str = embedding_dynamic_padding
A_ : Dict = output_stride
A_ : Any = width_factor
A_ : int = ['stem'] + [F'''stage{idx}''' for idx in range(1 , len(lowercase ) + 1 )]
A_ , A_ : List[Any] = get_aligned_output_features_output_indices(
out_features=lowercase , out_indices=lowercase , stage_names=self.stage_names )
| 558 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Tuple =logging.get_logger(__name__)
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple ) -> Optional[Any]:
'''simple docstring'''
lowercase = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
lowercase = [1_4_4, 1_9_2, 2_4_0]
lowercase = [1_6, 3_2, 6_4, 9_6, 1_2_8, 1_6_0, 6_4_0]
elif "mobilevit_xs" in mobilevit_name:
lowercase = [9_6, 1_2_0, 1_4_4]
lowercase = [1_6, 3_2, 4_8, 6_4, 8_0, 9_6, 3_8_4]
elif "mobilevit_xxs" in mobilevit_name:
lowercase = [6_4, 8_0, 9_6]
lowercase = [1_6, 1_6, 2_4, 4_8, 6_4, 8_0, 3_2_0]
lowercase = 0.05
lowercase = 2.0
if mobilevit_name.startswith("""deeplabv3_""" ):
lowercase = 5_1_2
lowercase = 1_6
lowercase = 2_1
lowercase = """pascal-voc-id2label.json"""
else:
lowercase = 1_0_0_0
lowercase = """imagenet-1k-id2label.json"""
lowercase = """huggingface/label-files"""
lowercase = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="""dataset""" ) , """r""" ) )
lowercase = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple=False ) -> str:
'''simple docstring'''
for i in range(1 , 6 ):
if f'layer_{i}.' in name:
lowercase = name.replace(f'layer_{i}.' , f'encoder.layer.{i - 1}.' )
if "conv_1." in name:
lowercase = name.replace("""conv_1.""" , """conv_stem.""" )
if ".block." in name:
lowercase = name.replace(""".block.""" , """.""" )
if "exp_1x1" in name:
lowercase = name.replace("""exp_1x1""" , """expand_1x1""" )
if "red_1x1" in name:
lowercase = name.replace("""red_1x1""" , """reduce_1x1""" )
if ".local_rep.conv_3x3." in name:
lowercase = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" )
if ".local_rep.conv_1x1." in name:
lowercase = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" )
if ".norm." in name:
lowercase = name.replace(""".norm.""" , """.normalization.""" )
if ".conv." in name:
lowercase = name.replace(""".conv.""" , """.convolution.""" )
if ".conv_proj." in name:
lowercase = name.replace(""".conv_proj.""" , """.conv_projection.""" )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if f'.{i}.{j}.' in name:
lowercase = name.replace(f'.{i}.{j}.' , f'.{i}.layer.{j}.' )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if f'.{i}.{j}.' in name:
lowercase = name.replace(f'.{i}.{j}.' , f'.{i}.' )
if "expand_1x1" in name:
lowercase = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" )
if "conv_3x3" in name:
lowercase = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" )
if "reduce_1x1" in name:
lowercase = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" )
for i in range(2 , 5 ):
if f'.global_rep.{i}.weight' in name:
lowercase = name.replace(f'.global_rep.{i}.weight' , """.layernorm.weight""" )
if f'.global_rep.{i}.bias' in name:
lowercase = name.replace(f'.global_rep.{i}.bias' , """.layernorm.bias""" )
if ".global_rep." in name:
lowercase = name.replace(""".global_rep.""" , """.transformer.""" )
if ".pre_norm_mha.0." in name:
lowercase = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" )
if ".pre_norm_mha.1.out_proj." in name:
lowercase = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" )
if ".pre_norm_ffn.0." in name:
lowercase = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" )
if ".pre_norm_ffn.1." in name:
lowercase = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" )
if ".pre_norm_ffn.4." in name:
lowercase = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" )
if ".transformer." in name:
lowercase = name.replace(""".transformer.""" , """.transformer.layer.""" )
if ".aspp_layer." in name:
lowercase = name.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in name:
lowercase = name.replace(""".aspp_pool.""" , """.""" )
if "seg_head." in name:
lowercase = name.replace("""seg_head.""" , """segmentation_head.""" )
if "segmentation_head.classifier.classifier." in name:
lowercase = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" )
if "classifier.fc." in name:
lowercase = name.replace("""classifier.fc.""" , """classifier.""" )
elif (not base_model) and ("segmentation_head." not in name):
lowercase = """mobilevit.""" + name
return name
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :str=False ) -> List[str]:
'''simple docstring'''
if base_model:
lowercase = """"""
else:
lowercase = """mobilevit."""
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(lowerCAmelCase__ )
if key[:8] == "encoder.":
lowercase = key[8:]
if "qkv" in key:
lowercase = key.split(""".""" )
lowercase = int(key_split[0][6:] ) - 1
lowercase = int(key_split[3] )
lowercase = model.get_submodule(f'{model_prefix}encoder.layer.{layer_num}' )
lowercase = layer.transformer.layer[transformer_num].attention.attention.all_head_size
lowercase = (
f'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'
)
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[dim : dim * 2, :]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
else:
lowercase = val
return orig_state_dict
def UpperCAmelCase__ ( ) -> Optional[int]:
'''simple docstring'''
lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Any=False ) -> int:
'''simple docstring'''
lowercase = get_mobilevit_config(lowerCAmelCase__ )
# load original state_dict
lowercase = torch.load(lowerCAmelCase__ , map_location="""cpu""" )
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_""" ):
lowercase = MobileViTForSemanticSegmentation(lowerCAmelCase__ ).eval()
else:
lowercase = MobileViTForImageClassification(lowerCAmelCase__ ).eval()
lowercase = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowercase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 3_2 )
lowercase = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowercase = model(**lowerCAmelCase__ )
lowercase = outputs.logits
if mobilevit_name.startswith("""deeplabv3_""" ):
assert logits.shape == (1, 2_1, 3_2, 3_2)
if mobilevit_name == "deeplabv3_mobilevit_s":
lowercase = torch.tensor(
[
[[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]],
[[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]],
[[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
lowercase = torch.tensor(
[
[[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]],
[[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]],
[[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
lowercase = torch.tensor(
[
[[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]],
[[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]],
[[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]],
] )
else:
raise ValueError(f'Unknown mobilevit_name: {mobilevit_name}' )
assert torch.allclose(logits[0, :3, :3, :3] , lowerCAmelCase__ , atol=1e-4 )
else:
assert logits.shape == (1, 1_0_0_0)
if mobilevit_name == "mobilevit_s":
lowercase = torch.tensor([-0.9_866, 0.2_392, -1.1_241] )
elif mobilevit_name == "mobilevit_xs":
lowercase = torch.tensor([-2.4_761, -0.9_399, -1.9_587] )
elif mobilevit_name == "mobilevit_xxs":
lowercase = torch.tensor([-1.9_364, -1.2_327, -0.4_653] )
else:
raise ValueError(f'Unknown mobilevit_name: {mobilevit_name}' )
assert torch.allclose(logits[0, :3] , lowerCAmelCase__ , atol=1e-4 )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(f'Saving model {mobilevit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
lowercase = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""" )
lowercase = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowerCAmelCase__ , organization="""apple""" )
model.push_to_hub(lowerCAmelCase__ , organization="""apple""" )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--mobilevit_name""",
default="""mobilevit_s""",
type=str,
help=(
"""Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"""
""" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."""
),
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__lowerCAmelCase : int =parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 197 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Any =logging.get_logger(__name__)
__lowerCAmelCase : Union[str, Any] ={
"""facebook/timesformer""": """https://huggingface.co/facebook/timesformer/resolve/main/config.json""",
}
class _A ( lowerCAmelCase ):
snake_case__ : Optional[Any] = 'timesformer'
def __init__( self , __lowerCAmelCase=224 , __lowerCAmelCase=16 , __lowerCAmelCase=3 , __lowerCAmelCase=8 , __lowerCAmelCase=768 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1E-6 , __lowerCAmelCase=True , __lowerCAmelCase="divided_space_time" , __lowerCAmelCase=0 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
lowercase = image_size
lowercase = patch_size
lowercase = num_channels
lowercase = num_frames
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = qkv_bias
lowercase = attention_type
lowercase = drop_path_rate
| 197 | 1 |
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Any:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(lowercase ):
lowerCamelCase_ = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
lowerCamelCase_ = FlaxAutoModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(lowercase ):
lowerCamelCase_ = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
lowerCamelCase_ = FlaxAutoModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
lowerCamelCase_ = AutoTokenizer.from_pretrained(lowercase )
lowerCamelCase_ = FlaxBertModel.from_pretrained(lowercase )
lowerCamelCase_ = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**lowercase ):
return model(**lowercase )
eval(**lowercase ).block_until_ready()
@slow
def SCREAMING_SNAKE_CASE_( self ) -> int:
for model_name in ["roberta-base", "roberta-large"]:
lowerCamelCase_ = AutoTokenizer.from_pretrained(lowercase )
lowerCamelCase_ = FlaxRobertaModel.from_pretrained(lowercase )
lowerCamelCase_ = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**lowercase ):
return model(**lowercase )
eval(**lowercase ).block_until_ready()
def SCREAMING_SNAKE_CASE_( self ) -> int:
with self.assertRaisesRegex(
lowercase , "bert-base is not a local folder and is not a valid model identifier" ):
lowerCamelCase_ = FlaxAutoModel.from_pretrained("bert-base" )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
with self.assertRaisesRegex(
lowercase , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
lowerCamelCase_ = FlaxAutoModel.from_pretrained(lowercase , revision="aaaaaa" )
def SCREAMING_SNAKE_CASE_( self ) -> int:
with self.assertRaisesRegex(
lowercase , "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack" , ):
lowerCamelCase_ = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def SCREAMING_SNAKE_CASE_( self ) -> int:
with self.assertRaisesRegex(lowercase , "Use `from_pt=True` to load this model" ):
lowerCamelCase_ = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
| 463 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , lowercase=99 , lowercase=13 , lowercase=7 , lowercase=9 , lowercase=True , lowercase=True , lowercase=False , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase=8 , lowercase=0.1 , lowercase=0.0_0_2 , lowercase=1 , lowercase=0 , lowercase=0 , lowercase=None , lowercase=None , ) -> Tuple:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = encoder_seq_length
lowerCamelCase_ = decoder_seq_length
# For common tests
lowerCamelCase_ = self.decoder_seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_attention_mask
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = d_ff
lowerCamelCase_ = relative_attention_num_buckets
lowerCamelCase_ = dropout_rate
lowerCamelCase_ = initializer_factor
lowerCamelCase_ = eos_token_id
lowerCamelCase_ = pad_token_id
lowerCamelCase_ = decoder_start_token_id
lowerCamelCase_ = None
lowerCamelCase_ = decoder_layers
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
return TaConfig.from_pretrained("google/umt5-base" )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None , ) -> str:
if attention_mask is None:
lowerCamelCase_ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCamelCase_ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCamelCase_ = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=lowercase )
if decoder_head_mask is None:
lowerCamelCase_ = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=lowercase )
if cross_attn_head_mask is None:
lowerCamelCase_ = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=lowercase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCamelCase_ = input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase_ = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase_ = self.get_config()
lowerCamelCase_ = config.num_attention_heads
lowerCamelCase_ = self.prepare_inputs_dict(lowercase , lowercase , lowercase )
return config, input_dict
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ , lowerCamelCase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Optional[Any]:
lowerCamelCase_ = UMTaModel(config=lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(
input_ids=lowercase , decoder_input_ids=lowercase , attention_mask=lowercase , decoder_attention_mask=lowercase , )
lowerCamelCase_ = model(input_ids=lowercase , decoder_input_ids=lowercase )
lowerCamelCase_ = result.last_hidden_state
lowerCamelCase_ = result.past_key_values
lowerCamelCase_ = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(lowercase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Optional[int]:
lowerCamelCase_ = UMTaModel(config=lowercase ).get_decoder().to(lowercase ).eval()
# first forward pass
lowerCamelCase_ = model(lowercase , use_cache=lowercase )
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = model(lowercase , use_cache=lowercase )
self.parent.assertTrue(len(lowercase ) == len(lowercase ) )
self.parent.assertTrue(len(lowercase ) == len(lowercase ) + 1 )
lowerCamelCase_ , lowerCamelCase_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase_ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
lowerCamelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase_ = model(lowercase )["last_hidden_state"]
lowerCamelCase_ = model(lowercase , past_key_values=lowercase )["last_hidden_state"]
# select random slice
lowerCamelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase_ = output_from_no_past[:, -1, random_slice_idx].detach()
lowerCamelCase_ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase , lowercase , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , ) -> Tuple:
lowerCamelCase_ = UMTaModel(config=lowercase ).to(lowercase ).half().eval()
lowerCamelCase_ = model(**lowercase )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(lowercase ).any().item() )
@require_torch
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
lowerCAmelCase__ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
lowerCAmelCase__ = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
lowerCAmelCase__ = [0.8, 0.9]
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
lowerCamelCase_ = UMTaModel(config_and_inputs[0] ).to(lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
lowercase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'{tmpdirname}/t5_test.onnx' , export_params=lowercase , opset_version=9 , input_names=["input_ids", "decoder_input_ids"] , )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
lowerCamelCase_ = config_and_inputs[0]
lowerCamelCase_ = UMTaForConditionalGeneration(lowercase ).eval()
model.to(lowercase )
lowerCamelCase_ = {
"head_mask": torch.zeros(config.num_layers , config.num_heads , device=lowercase ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowercase ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowercase ),
}
for attn_name, (name, mask) in zip(lowercase , head_masking.items() ):
lowerCamelCase_ = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
lowerCamelCase_ = torch.ones(
config.num_decoder_layers , config.num_heads , device=lowercase )
lowerCamelCase_ = model.generate(
config_and_inputs[1]["input_ids"] , num_beams=1 , max_length=3 , output_attentions=lowercase , return_dict_in_generate=lowercase , **lowercase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
lowerCamelCase_ = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = UMTaForConditionalGeneration.from_pretrained("google/umt5-small" , return_dict=lowercase ).to(lowercase )
lowerCamelCase_ = AutoTokenizer.from_pretrained("google/umt5-small" , use_fast=lowercase , legacy=lowercase )
lowerCamelCase_ = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
lowerCamelCase_ = tokenizer(lowercase , return_tensors="pt" , padding=lowercase ).input_ids
# fmt: off
lowerCamelCase_ = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(lowercase , lowercase )
lowerCamelCase_ = model.generate(input_ids.to(lowercase ) )
lowerCamelCase_ = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
lowerCamelCase_ = tokenizer.batch_decode(lowercase )
self.assertEqual(lowercase , lowercase )
| 463 | 1 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class _a :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=13 , __UpperCAmelCase=30 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=[0, 1, 2, 3] , ):
"""simple docstring"""
a__ : List[str] = parent
a__ : List[str] = 100
a__ : Union[str, Any] = batch_size
a__ : List[Any] = image_size
a__ : int = patch_size
a__ : Tuple = num_channels
a__ : Tuple = is_training
a__ : List[Any] = use_labels
a__ : List[str] = hidden_size
a__ : Any = num_hidden_layers
a__ : str = num_attention_heads
a__ : Optional[int] = intermediate_size
a__ : int = hidden_act
a__ : Union[str, Any] = hidden_dropout_prob
a__ : Tuple = attention_probs_dropout_prob
a__ : List[Any] = type_sequence_label_size
a__ : List[Any] = initializer_range
a__ : Any = scope
a__ : Any = out_indices
a__ : List[str] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a__ : Any = (image_size // patch_size) ** 2
a__ : Any = num_patches + 1
def _A ( self ):
"""simple docstring"""
a__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ : List[Any] = None
a__ : Dict = None
if self.use_labels:
a__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a__ : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def _A ( self ):
"""simple docstring"""
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def _A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
a__ : Dict = BeitModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
a__ : str = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
a__ : Any = BeitForMaskedImageModeling(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
a__ : str = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
a__ : Union[str, Any] = self.type_sequence_label_size
a__ : Tuple = BeitForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
a__ : Optional[int] = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a__ : List[Any] = 1
a__ : Tuple = BeitForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
a__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a__ : List[Any] = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
a__ : Any = self.num_labels
a__ : List[Any] = BeitForSemanticSegmentation(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
a__ : Dict = model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
a__ : str = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _A ( self ):
"""simple docstring"""
a__ : List[str] = self.prepare_config_and_inputs()
a__ , a__ , a__ , a__ : Dict = config_and_inputs
a__ : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A :List[str] = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
A :Any = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A :Tuple = False
A :Union[str, Any] = False
A :List[str] = False
def _A ( self ):
"""simple docstring"""
a__ : Union[str, Any] = BeitModelTester(self )
a__ : Tuple = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def _A ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def _A ( self ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def _A ( self ):
"""simple docstring"""
pass
def _A ( self ):
"""simple docstring"""
a__ , a__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Dict = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a__ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def _A ( self ):
"""simple docstring"""
a__ , a__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Optional[int] = model_class(__UpperCAmelCase )
a__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Union[str, Any] = [*signature.parameters.keys()]
a__ : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def _A ( self ):
"""simple docstring"""
a__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def _A ( self ):
"""simple docstring"""
a__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def _A ( self ):
"""simple docstring"""
a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def _A ( self ):
"""simple docstring"""
a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase )
def _A ( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
a__ , a__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
a__ : Tuple = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__UpperCAmelCase ), BeitForMaskedImageModeling]:
continue
a__ : List[Any] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
a__ : List[str] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
a__ : Optional[int] = model(**__UpperCAmelCase ).loss
loss.backward()
def _A ( self ):
"""simple docstring"""
a__ , a__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
a__ : Tuple = False
a__ : Tuple = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__UpperCAmelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
a__ : Union[str, Any] = model_class(__UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCAmelCase )
model.train()
a__ : Optional[Any] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
a__ : int = model(**__UpperCAmelCase ).loss
loss.backward()
def _A ( self ):
"""simple docstring"""
a__ , a__ : int = self.model_tester.prepare_config_and_inputs_for_common()
a__ : Optional[int] = _config_zero_init(__UpperCAmelCase )
for model_class in self.all_model_classes:
a__ : Dict = model_class(config=__UpperCAmelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def _A ( self ):
"""simple docstring"""
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Dict = BeitModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def SCREAMING_SNAKE_CASE( ) -> Dict:
a__ : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _A ( self ):
"""simple docstring"""
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def _A ( self ):
"""simple docstring"""
a__ : int = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(__UpperCAmelCase )
a__ : Tuple = self.default_image_processor
a__ : Optional[Any] = prepare_img()
a__ : Optional[int] = image_processor(images=__UpperCAmelCase , return_tensors="pt" ).pixel_values.to(__UpperCAmelCase )
# prepare bool_masked_pos
a__ : Optional[int] = torch.ones((1, 196) , dtype=torch.bool ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
a__ : Optional[Any] = model(pixel_values=__UpperCAmelCase , bool_masked_pos=__UpperCAmelCase )
a__ : Any = outputs.logits
# verify the logits
a__ : List[str] = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , __UpperCAmelCase )
a__ : List[str] = torch.tensor(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __UpperCAmelCase , atol=1E-2 ) )
@slow
def _A ( self ):
"""simple docstring"""
a__ : Union[str, Any] = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(__UpperCAmelCase )
a__ : str = self.default_image_processor
a__ : Optional[Any] = prepare_img()
a__ : List[Any] = image_processor(images=__UpperCAmelCase , return_tensors="pt" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
a__ : Union[str, Any] = model(**__UpperCAmelCase )
a__ : List[Any] = outputs.logits
# verify the logits
a__ : List[str] = torch.Size((1, 1000) )
self.assertEqual(logits.shape , __UpperCAmelCase )
a__ : Optional[int] = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
a__ : List[str] = 281
self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase )
@slow
def _A ( self ):
"""simple docstring"""
a__ : Optional[int] = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
__UpperCAmelCase )
a__ : Tuple = self.default_image_processor
a__ : Union[str, Any] = prepare_img()
a__ : int = image_processor(images=__UpperCAmelCase , return_tensors="pt" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
a__ : Optional[int] = model(**__UpperCAmelCase )
a__ : Optional[Any] = outputs.logits
# verify the logits
a__ : List[Any] = torch.Size((1, 2_1841) )
self.assertEqual(logits.shape , __UpperCAmelCase )
a__ : str = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
a__ : Any = 2396
self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase )
@slow
def _A ( self ):
"""simple docstring"""
a__ : Any = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
a__ : Dict = model.to(__UpperCAmelCase )
a__ : List[Any] = BeitImageProcessor(do_resize=__UpperCAmelCase , size=640 , do_center_crop=__UpperCAmelCase )
a__ : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
a__ : List[str] = Image.open(ds[0]["file"] )
a__ : Union[str, Any] = image_processor(images=__UpperCAmelCase , return_tensors="pt" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
a__ : List[Any] = model(**__UpperCAmelCase )
a__ : int = outputs.logits
# verify the logits
a__ : List[str] = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , __UpperCAmelCase )
a__ : Union[str, Any] = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
a__ : Optional[int] = torch.tensor(
[
[[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]],
[[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]],
[[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]],
] , device=__UpperCAmelCase , )
else:
a__ : List[Any] = torch.tensor(
[
[[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]],
[[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]],
[[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]],
] , device=__UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def _A ( self ):
"""simple docstring"""
a__ : int = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
a__ : Tuple = model.to(__UpperCAmelCase )
a__ : List[Any] = BeitImageProcessor(do_resize=__UpperCAmelCase , size=640 , do_center_crop=__UpperCAmelCase )
a__ : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
a__ : str = Image.open(ds[0]["file"] )
a__ : List[str] = image_processor(images=__UpperCAmelCase , return_tensors="pt" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
a__ : Optional[Any] = model(**__UpperCAmelCase )
a__ : Any = outputs.logits.detach().cpu()
a__ : str = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase , target_sizes=[(500, 300)] )
a__ : Optional[Any] = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
a__ : List[str] = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase )
a__ : Optional[int] = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
| 207 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def SCREAMING_SNAKE_CASE( __UpperCamelCase = 8 ) -> str:
a__ : Optional[int] = ascii_letters + digits + punctuation
return "".join(secrets.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(__UpperCamelCase )
a__ : List[Any] = i // 3
a__ : int = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
a__ : Union[str, Any] = (
chars_incl
+ random(__UpperCamelCase , quotient + remainder )
+ random(__UpperCamelCase , __UpperCamelCase )
+ random(__UpperCamelCase , __UpperCamelCase )
)
a__ : Tuple = list(__UpperCamelCase )
shuffle(__UpperCamelCase )
return "".join(__UpperCamelCase )
# random is a generalised function for letters, characters and numbers
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> str:
return "".join(secrets.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> List[str]:
pass # Put your code here...
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
pass # Put your code here...
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
pass # Put your code here...
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase = 8 ) -> bool:
if len(__UpperCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
a__ : Dict = any(char in ascii_uppercase for char in password )
a__ : Optional[int] = any(char in ascii_lowercase for char in password )
a__ : Optional[Any] = any(char in digits for char in password )
a__ : Tuple = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def SCREAMING_SNAKE_CASE( ) -> Dict:
a__ : List[Any] = int(input("Please indicate the max length of your password: " ).strip() )
a__ : Optional[Any] = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(__UpperCamelCase ) )
print(
"Alternative Password generated:" , alternative_password_generator(__UpperCamelCase , __UpperCamelCase ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 207 | 1 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
lowercase__ =logging.get_logger(__name__)
lowercase__ ={}
lowercase__ ={}
lowercase__ ={}
def __UpperCamelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : str = None , ):
__a : Any = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" )
__a : List[str] = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" )
__a : Any = format_type
def __UpperCamelCase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] = None ):
__a : Tuple = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
__a : Any = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
lowercase__ =ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
lowercase__ =ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
lowercase__ =ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def __UpperCamelCase ( lowerCAmelCase__ : int ):
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __UpperCamelCase ( lowerCAmelCase__ : int , **lowerCAmelCase__ : List[str] ):
__a : Tuple = get_format_type_from_alias(lowerCAmelCase__ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**lowerCAmelCase__ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'" )
| 521 |
from collections.abc import Sequence
from queue import Queue
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None )->List[str]:
'''simple docstring'''
A_ : List[str] = start
A_ : Dict = end
A_ : Optional[Any] = val
A_ : Optional[int] = (start + end) // 2
A_ : List[Any] = left
A_ : Any = right
def __repr__( self )->List[Any]:
'''simple docstring'''
return F'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Tuple:
'''simple docstring'''
A_ : Union[str, Any] = collection
A_ : int = function
if self.collection:
A_ : Tuple = self._build_tree(0 , len(_SCREAMING_SNAKE_CASE ) - 1 )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Optional[int]:
'''simple docstring'''
self._update_tree(self.root , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->List[str]:
'''simple docstring'''
return self._query_range(self.root , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->int:
'''simple docstring'''
if start == end:
return SegmentTreeNode(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.collection[start] )
A_ : List[str] = (start + end) // 2
A_ : str = self._build_tree(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : List[Any] = self._build_tree(mid + 1 , _SCREAMING_SNAKE_CASE )
return SegmentTreeNode(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.fn(left.val , right.val ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Any:
'''simple docstring'''
if node.start == i and node.end == i:
A_ : str = val
return
if i <= node.mid:
self._update_tree(node.left , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
self._update_tree(node.right , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : List[str] = self.fn(node.left.val , node.right.val )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Optional[Any]:
'''simple docstring'''
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , _SCREAMING_SNAKE_CASE , node.mid ) , self._query_range(node.right , node.mid + 1 , _SCREAMING_SNAKE_CASE ) , )
else:
# range in right child tree
return self._query_range(node.right , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
if self.root is not None:
A_ : Any = Queue()
queue.put(self.root )
while not queue.empty():
A_ : List[str] = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("""*""" * 50)
UpperCamelCase = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 590 | 0 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class A__ :
'''simple docstring'''
def __init__( self : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict=13 , _SCREAMING_SNAKE_CASE : Dict=30 , _SCREAMING_SNAKE_CASE : Union[str, Any]=2 , _SCREAMING_SNAKE_CASE : List[Any]=3 , _SCREAMING_SNAKE_CASE : Tuple=True , _SCREAMING_SNAKE_CASE : str=True , _SCREAMING_SNAKE_CASE : str=32 , _SCREAMING_SNAKE_CASE : Any=2 , _SCREAMING_SNAKE_CASE : int=4 , _SCREAMING_SNAKE_CASE : Tuple=37 , _SCREAMING_SNAKE_CASE : Optional[int]="gelu" , _SCREAMING_SNAKE_CASE : int=0.1 , _SCREAMING_SNAKE_CASE : str=0.1 , _SCREAMING_SNAKE_CASE : str=10 , _SCREAMING_SNAKE_CASE : str=0.0_2 , _SCREAMING_SNAKE_CASE : Union[str, Any]=3 , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : str=2 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = scope
UpperCamelCase = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCamelCase = (image_size // patch_size) ** 2
UpperCamelCase = num_patches + 2
def _SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
UpperCamelCase = TFDeiTModel(config=_SCREAMING_SNAKE_CASE )
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Any , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
UpperCamelCase = TFDeiTForMaskedImageModeling(config=_SCREAMING_SNAKE_CASE )
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = TFDeiTForMaskedImageModeling(_SCREAMING_SNAKE_CASE )
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _SCREAMING_SNAKE_CASE ( self : Any , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
UpperCamelCase = self.type_sequence_label_size
UpperCamelCase = TFDeiTForImageClassification(_SCREAMING_SNAKE_CASE )
UpperCamelCase = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = TFDeiTForImageClassification(_SCREAMING_SNAKE_CASE )
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class A__ ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
snake_case__ = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
snake_case__ = (
{
"""feature-extraction""": TFDeiTModel,
"""image-classification""": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def _SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
UpperCamelCase = TFDeiTModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , tf.keras.layers.Dense ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
UpperCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str]=False ):
"""simple docstring"""
UpperCamelCase = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = TFDeiTModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def lowercase__ ( ) -> Dict:
"""simple docstring"""
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_tf
@require_vision
class A__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='tf' )
# forward pass
UpperCamelCase = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
UpperCamelCase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase = tf.constant([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 410 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ : int = logging.get_logger(__name__)
__magic_name__ : Optional[Any] = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class A__ ( __snake_case ):
'''simple docstring'''
snake_case__ = """visual_bert"""
def __init__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int]=3_0522 , _SCREAMING_SNAKE_CASE : Dict=768 , _SCREAMING_SNAKE_CASE : Tuple=512 , _SCREAMING_SNAKE_CASE : Optional[Any]=12 , _SCREAMING_SNAKE_CASE : Any=12 , _SCREAMING_SNAKE_CASE : Any=3072 , _SCREAMING_SNAKE_CASE : Dict="gelu" , _SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , _SCREAMING_SNAKE_CASE : Any=0.1 , _SCREAMING_SNAKE_CASE : Optional[int]=512 , _SCREAMING_SNAKE_CASE : List[Any]=2 , _SCREAMING_SNAKE_CASE : str=0.0_2 , _SCREAMING_SNAKE_CASE : Any=1E-1_2 , _SCREAMING_SNAKE_CASE : Any=False , _SCREAMING_SNAKE_CASE : Optional[int]=True , _SCREAMING_SNAKE_CASE : Dict=1 , _SCREAMING_SNAKE_CASE : Optional[int]=0 , _SCREAMING_SNAKE_CASE : str=2 , **_SCREAMING_SNAKE_CASE : Dict , ):
"""simple docstring"""
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
UpperCamelCase = hidden_size
UpperCamelCase = visual_embedding_dim
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = type_vocab_size
UpperCamelCase = layer_norm_eps
UpperCamelCase = bypass_transformer
UpperCamelCase = special_visual_initialize
| 410 | 1 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class lowerCamelCase_ :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=4 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.1 , __lowerCAmelCase=True , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ):
"""simple docstring"""
__magic_name__ :str = parent
__magic_name__ :Dict = batch_size
__magic_name__ :List[Any] = seq_length
__magic_name__ :int = is_training
__magic_name__ :Optional[int] = use_input_mask
__magic_name__ :Tuple = use_token_type_ids
__magic_name__ :Optional[Any] = use_labels
__magic_name__ :Tuple = vocab_size
__magic_name__ :List[str] = hidden_size
__magic_name__ :str = num_hidden_layers
__magic_name__ :int = num_attention_heads
__magic_name__ :Tuple = intermediate_multiple_size
__magic_name__ :int = hidden_act
__magic_name__ :Optional[int] = hidden_dropout
__magic_name__ :Optional[Any] = attention_dropout
__magic_name__ :Optional[Any] = weight_tying
__magic_name__ :str = max_position_embeddings
__magic_name__ :Any = type_vocab_size
__magic_name__ :Optional[int] = type_sequence_label_size
__magic_name__ :List[str] = initializer_range
__magic_name__ :Dict = num_labels
__magic_name__ :Optional[Any] = num_choices
__magic_name__ :List[Any] = scope
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ :Optional[Any] = None
if self.use_input_mask:
__magic_name__ :Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ :List[str] = None
if self.use_labels:
__magic_name__ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ :Dict = self.get_config()
return config, input_ids, input_mask, token_labels
def A ( self ):
"""simple docstring"""
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
def A ( self ):
"""simple docstring"""
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ :List[Any] = self.prepare_config_and_inputs()
__magic_name__ :Union[str, Any] = True
return config, input_ids, input_mask, token_labels
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :str = GPTNeoXJapaneseModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__magic_name__ :Tuple = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
__magic_name__ :Dict = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :str = True
__magic_name__ :Union[str, Any] = GPTNeoXJapaneseModel(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__magic_name__ :Any = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :str = GPTNeoXJapaneseForCausalLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__magic_name__ :List[Any] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = True
__magic_name__ :Dict = GPTNeoXJapaneseForCausalLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
# first forward pass
__magic_name__ :List[str] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , use_cache=__lowerCAmelCase )
__magic_name__ :Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__magic_name__ :str = ids_tensor((self.batch_size, 3) , config.vocab_size )
__magic_name__ :Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__magic_name__ :List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
__magic_name__ :Optional[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
__magic_name__ :Tuple = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
__magic_name__ :int = output_from_no_past['''hidden_states'''][0]
__magic_name__ :Dict = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , )['''hidden_states'''][0]
# select random slice
__magic_name__ :int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__magic_name__ :Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
__magic_name__ :Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ :Optional[int] = config_and_inputs
__magic_name__ :Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
a__ = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
a__ = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
a__ = (
{'''feature-extraction''': GPTNeoXJapaneseModel, '''text-generation''': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
def A ( self ):
"""simple docstring"""
__magic_name__ :int = GPTNeoXJapaneseModelTester(self )
__magic_name__ :Union[str, Any] = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def A ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self ):
"""simple docstring"""
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ :int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
# This regression test was failing with PyTorch < 1.3
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ :int = self.model_tester.prepare_config_and_inputs_for_decoder()
__magic_name__ :List[str] = None
self.model_tester.create_and_check_model_as_decoder(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__lowerCAmelCase )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = '''abeja/gpt-neox-japanese-2.7b'''
__magic_name__ :Union[str, Any] = ['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、''']
__magic_name__ :Union[str, Any] = [
'''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''',
'''100年後に必要とされる会社は、「人」が中心の会社です。''',
'''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''',
'''国境の長いトンネルを抜けると、そこは雪国だった。''',
'''美味しい日本食といえば、やっぱりお寿司ですよね。''',
]
__magic_name__ :Optional[int] = GPTNeoXJapaneseTokenizer.from_pretrained(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(__lowerCAmelCase )
__magic_name__ :Optional[Any] = []
for prompt in prompts:
__magic_name__ :Optional[int] = tokenizer(__lowerCAmelCase , return_tensors='''pt''' ).input_ids
__magic_name__ :List[Any] = model.generate(__lowerCAmelCase , max_length=5_0 )
__magic_name__ :Dict = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
| 0 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def a__ ( snake_case__ ) -> list[list[float]]:
lowerCamelCase = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(snake_case__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
lowerCamelCase = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creates a copy of the matrix with swapped positions of the elements
lowerCamelCase = [[0.0, 0.0], [0.0, 0.0]]
lowerCamelCase , lowerCamelCase = matrix[1][1], matrix[0][0]
lowerCamelCase , lowerCamelCase = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(snake_case__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(snake_case__ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
lowerCamelCase = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creating cofactor matrix
lowerCamelCase = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
lowerCamelCase = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
lowerCamelCase = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
lowerCamelCase = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
lowerCamelCase = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
lowerCamelCase = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
lowerCamelCase = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
lowerCamelCase = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
lowerCamelCase = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
lowerCamelCase = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
lowerCamelCase = array(snake_case__ )
for i in range(3 ):
for j in range(3 ):
lowerCamelCase = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
lowerCamelCase = array(snake_case__ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(snake_case__ )
# Calculate the inverse of the matrix
return [[float(d(snake_case__ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
| 543 | 0 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : np.ndarray ,lowerCAmelCase_ : Union[int, Iterable[int]] ,lowerCAmelCase_ : bool ,lowerCAmelCase_ : int ) -> Tuple[int, int]:
"""simple docstring"""
def constraint_to_multiple_of(lowerCAmelCase_ : Union[str, Any] ,lowerCAmelCase_ : int ,lowerCAmelCase_ : Any=0 ,lowerCAmelCase_ : Optional[Any]=None ):
SCREAMING_SNAKE_CASE_ : Optional[Any] =round(val / multiple ) * multiple
if max_val is not None and x > max_val:
SCREAMING_SNAKE_CASE_ : int =math.floor(val / multiple ) * multiple
if x < min_val:
SCREAMING_SNAKE_CASE_ : Optional[int] =math.ceil(val / multiple ) * multiple
return x
SCREAMING_SNAKE_CASE_ : Union[str, Any] =(output_size, output_size) if isinstance(lowerCAmelCase_ ,lowerCAmelCase_ ) else output_size
SCREAMING_SNAKE_CASE_ : str =get_image_size(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Dict =output_size
# determine new height and width
SCREAMING_SNAKE_CASE_ : Optional[Any] =output_height / input_height
SCREAMING_SNAKE_CASE_ : str =output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
SCREAMING_SNAKE_CASE_ : List[Any] =scale_width
else:
# fit height
SCREAMING_SNAKE_CASE_ : Optional[int] =scale_height
SCREAMING_SNAKE_CASE_ : Any =constraint_to_multiple_of(scale_height * input_height ,multiple=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] =constraint_to_multiple_of(scale_width * input_width ,multiple=lowerCAmelCase_ )
return (new_height, new_width)
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
_lowercase = ['pixel_values']
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BILINEAR , __UpperCAmelCase = False , __UpperCAmelCase = 1 , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
super().__init__(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple =size if size is not None else {'height': 384, 'width': 384}
SCREAMING_SNAKE_CASE_ : List[Any] =get_size_dict(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] =do_resize
SCREAMING_SNAKE_CASE_ : List[Any] =size
SCREAMING_SNAKE_CASE_ : Union[str, Any] =keep_aspect_ratio
SCREAMING_SNAKE_CASE_ : Optional[Any] =ensure_multiple_of
SCREAMING_SNAKE_CASE_ : List[str] =resample
SCREAMING_SNAKE_CASE_ : List[str] =do_rescale
SCREAMING_SNAKE_CASE_ : Union[str, Any] =rescale_factor
SCREAMING_SNAKE_CASE_ : Union[str, Any] =do_normalize
SCREAMING_SNAKE_CASE_ : List[Any] =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE_ : List[Any] =image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = 1 , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_ : List[str] =get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE_ : Optional[int] =get_resize_output_image_size(
__UpperCAmelCase , output_size=(size['height'], size['width']) , keep_aspect_ratio=__UpperCAmelCase , multiple=__UpperCAmelCase , )
return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_ : Dict =do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ : Any =size if size is not None else self.size
SCREAMING_SNAKE_CASE_ : str =get_size_dict(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : int =keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
SCREAMING_SNAKE_CASE_ : str =ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
SCREAMING_SNAKE_CASE_ : Optional[Any] =resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ : Union[str, Any] =do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ : Tuple =rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ : Union[str, Any] =do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ : str =image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ : Optional[Any] =image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ : int =make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ : List[str] =[to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ : Optional[int] =[self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_ : int =[self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ : List[Any] =[self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE_ : Optional[int] =[to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE_ : Optional[Any] ={'pixel_values': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
SCREAMING_SNAKE_CASE_ : int =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] =target_sizes.numpy()
SCREAMING_SNAKE_CASE_ : Optional[int] =[]
for idx in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Any =logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE_ : Dict =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 706 |
__SCREAMING_SNAKE_CASE = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__SCREAMING_SNAKE_CASE = [{'type': 'code', 'content': INSTALL_CONTENT}]
__SCREAMING_SNAKE_CASE = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 153 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__lowercase = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
__lowercase = {'''facebook/blenderbot-3B''': 128}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : List[str] = VOCAB_FILES_NAMES
a__ : int = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Optional[Any] = ["""input_ids""", """attention_mask"""]
a__ : List[Any] = BlenderbotTokenizer
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase="replace" , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , __lowercase=False , __lowercase=True , **__lowercase , ) -> Optional[int]:
super().__init__(
__lowercase , __lowercase , tokenizer_file=__lowercase , errors=__lowercase , bos_token=__lowercase , eos_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , unk_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , add_prefix_space=__lowercase , trim_offsets=__lowercase , **__lowercase , )
__UpperCamelCase :Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('''add_prefix_space''' , __lowercase) != add_prefix_space:
__UpperCamelCase :List[Any] = getattr(__lowercase , pre_tok_state.pop('''type'''))
__UpperCamelCase :Optional[Any] = add_prefix_space
__UpperCamelCase :Any = pre_tok_class(**__lowercase)
__UpperCamelCase :List[Any] = add_prefix_space
__UpperCamelCase :str = '''post_processor'''
__UpperCamelCase :Union[str, Any] = getattr(self.backend_tokenizer , __lowercase , __lowercase)
if tokenizer_component_instance:
__UpperCamelCase :Tuple = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__UpperCamelCase :Union[str, Any] = tuple(state['''sep'''])
if "cls" in state:
__UpperCamelCase :int = tuple(state['''cls'''])
__UpperCamelCase :Optional[int] = False
if state.get('''add_prefix_space''' , __lowercase) != add_prefix_space:
__UpperCamelCase :List[str] = add_prefix_space
__UpperCamelCase :Union[str, Any] = True
if state.get('''trim_offsets''' , __lowercase) != trim_offsets:
__UpperCamelCase :Dict = trim_offsets
__UpperCamelCase :Tuple = True
if changes_to_apply:
__UpperCamelCase :List[Any] = getattr(__lowercase , state.pop('''type'''))
__UpperCamelCase :List[Any] = component_class(**__lowercase)
setattr(self.backend_tokenizer , __lowercase , __lowercase)
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def UpperCamelCase__ ( self) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''')
return None
return str(self._mask_token)
@mask_token.setter
def UpperCamelCase__ ( self , __lowercase) -> Optional[int]:
__UpperCamelCase :Dict = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else value
__UpperCamelCase :Optional[int] = value
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> BatchEncoding:
__UpperCamelCase :int = kwargs.get('''is_split_into_words''' , __lowercase)
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__lowercase , **__lowercase)
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> BatchEncoding:
__UpperCamelCase :List[Any] = kwargs.get('''is_split_into_words''' , __lowercase)
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> Tuple[str]:
__UpperCamelCase :Union[str, Any] = self._tokenizer.model.save(__lowercase , name=__lowercase)
return tuple(__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> List[int]:
__UpperCamelCase :Tuple = [self.sep_token_id]
__UpperCamelCase :Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> Dict:
return token_ids_a + [self.eos_token_id]
def UpperCamelCase__ ( self , __lowercase) -> List[int]:
__UpperCamelCase :Any = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text)
else:
# Generated responses should contain them already.
inputs.append(__lowercase)
__UpperCamelCase :Tuple = ''' '''.join(__lowercase)
__UpperCamelCase :Any = self.encode(__lowercase)
if len(__lowercase) > self.model_max_length:
__UpperCamelCase :List[str] = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""")
return input_ids
| 167 | import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : Any = DDIMPipeline
a__ : int = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
a__ : Optional[int] = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
a__ : Any = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
a__ : Optional[int] = False
def UpperCamelCase__ ( self) -> Optional[Any]:
torch.manual_seed(0)
__UpperCamelCase :Tuple = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
__UpperCamelCase :Dict = DDIMScheduler()
__UpperCamelCase :int = {'''unet''': unet, '''scheduler''': scheduler}
return components
def UpperCamelCase__ ( self , __lowercase , __lowercase=0) -> Tuple:
if str(__lowercase).startswith('''mps'''):
__UpperCamelCase :Optional[int] = torch.manual_seed(__lowercase)
else:
__UpperCamelCase :Tuple = torch.Generator(device=__lowercase).manual_seed(__lowercase)
__UpperCamelCase :str = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :int = '''cpu'''
__UpperCamelCase :Any = self.get_dummy_components()
__UpperCamelCase :Any = self.pipeline_class(**__lowercase)
pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :Dict = self.get_dummy_inputs(__lowercase)
__UpperCamelCase :Union[str, Any] = pipe(**__lowercase).images
__UpperCamelCase :Dict = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3))
__UpperCamelCase :List[str] = np.array(
[1.0_0_0E0_0, 5.7_1_7E-0_1, 4.7_1_7E-0_1, 1.0_0_0E0_0, 0.0_0_0E0_0, 1.0_0_0E0_0, 3.0_0_0E-0_4, 0.0_0_0E0_0, 9.0_0_0E-0_4])
__UpperCamelCase :int = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(__lowercase , 1E-3)
def UpperCamelCase__ ( self) -> str:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3)
def UpperCamelCase__ ( self) -> Any:
super().test_save_load_local(expected_max_difference=3E-3)
def UpperCamelCase__ ( self) -> Optional[Any]:
super().test_save_load_optional_components(expected_max_difference=3E-3)
def UpperCamelCase__ ( self) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :int = '''google/ddpm-cifar10-32'''
__UpperCamelCase :str = UNetaDModel.from_pretrained(__lowercase)
__UpperCamelCase :int = DDIMScheduler()
__UpperCamelCase :Optional[Any] = DDIMPipeline(unet=__lowercase , scheduler=__lowercase)
ddim.to(__lowercase)
ddim.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :Dict = torch.manual_seed(0)
__UpperCamelCase :Tuple = ddim(generator=__lowercase , eta=0.0 , output_type='''numpy''').images
__UpperCamelCase :Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCamelCase :List[str] = np.array([0.17_23, 0.16_17, 0.16_00, 0.16_26, 0.14_97, 0.15_13, 0.15_05, 0.14_42, 0.14_53])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :int = '''google/ddpm-ema-bedroom-256'''
__UpperCamelCase :Optional[Any] = UNetaDModel.from_pretrained(__lowercase)
__UpperCamelCase :Union[str, Any] = DDIMScheduler.from_pretrained(__lowercase)
__UpperCamelCase :Optional[int] = DDIMPipeline(unet=__lowercase , scheduler=__lowercase)
ddpm.to(__lowercase)
ddpm.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :Dict = torch.manual_seed(0)
__UpperCamelCase :Optional[int] = ddpm(generator=__lowercase , output_type='''numpy''').images
__UpperCamelCase :Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__UpperCamelCase :Any = np.array([0.00_60, 0.02_01, 0.03_44, 0.00_24, 0.00_18, 0.00_02, 0.00_22, 0.00_00, 0.00_69])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 167 | 1 |
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 562 |
"""simple docstring"""
from timeit import timeit
UpperCamelCase = {
"""MALAYALAM""": True,
"""String""": False,
"""rotor""": True,
"""level""": True,
"""A""": True,
"""BB""": True,
"""ABC""": False,
"""amanaplanacanalpanama""": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def _lowerCamelCase ( UpperCAmelCase_ : str ) -> bool:
"""simple docstring"""
A__ = 0
A__ = len(UpperCAmelCase_ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def _lowerCamelCase ( UpperCAmelCase_ : str ) -> bool:
"""simple docstring"""
A__ = len(UpperCAmelCase_ ) // 2
A__ = len(UpperCAmelCase_ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(UpperCAmelCase_ ) )
def _lowerCamelCase ( UpperCAmelCase_ : str ) -> bool:
"""simple docstring"""
if len(UpperCAmelCase_ ) <= 2:
return True
if s[0] == s[len(UpperCAmelCase_ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def _lowerCamelCase ( UpperCAmelCase_ : str ) -> bool:
"""simple docstring"""
return s == s[::-1]
def _lowerCamelCase ( UpperCAmelCase_ : str ) -> None:
"""simple docstring"""
A__ = F"""all({name}(key) is value for key, value in test_data.items())"""
A__ = F"""from __main__ import test_data, {name}"""
A__ = 500000
A__ = timeit(stmt=UpperCAmelCase_, setup=UpperCAmelCase_, number=UpperCAmelCase_ )
print(F"""{name:<35} finished {number:,} runs in {result:.5f} seconds""" )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f'{key:21} {value}')
print("""a man a plan a canal panama""")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("""is_palindrome_slice""")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("""is_palindrome""")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("""is_palindrome_recursive""")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("""is_palindrome_traversal""")
| 562 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def UpperCamelCase__ ( __magic_name__ : Dict=None ) -> Tuple:
'''simple docstring'''
snake_case__ : List[Any] = argparse.ArgumentParser(add_help=UpperCamelCase__ , allow_abbrev=UpperCamelCase__ )
# The main config parser
snake_case__ : int = config_command_parser(UpperCamelCase__ )
# The subparser to add commands to
snake_case__ : Optional[Any] = config_parser.add_subparsers(title="""subcommands""" , dest="""subcommand""" )
# Then add other parsers with the parent parser
default_command_parser(UpperCamelCase__ , parents=[parent_parser] )
update_command_parser(UpperCamelCase__ , parents=[parent_parser] )
return config_parser
def UpperCamelCase__ ( ) -> List[Any]:
'''simple docstring'''
snake_case__ : int = get_config_parser()
snake_case__ : Any = config_parser.parse_args()
if not hasattr(UpperCamelCase__ , """func""" ):
config_parser.print_help()
exit(1 )
# Run
args.func(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 38 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 503 | 0 |
import requests
__UpperCamelCase : Dict = 'YOUR API KEY'
def _UpperCAmelCase ( UpperCAmelCase : str , UpperCAmelCase : str = giphy_api_key ):
"""simple docstring"""
__lowerCamelCase : Any = """+""".join(query.split() )
__lowerCamelCase : List[Any] = f"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"""
__lowerCamelCase : List[str] = requests.get(UpperCAmelCase ).json()["""data"""]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship')))
| 458 |
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : List[str] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
__UpperCamelCase : Dict = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _UpperCAmelCase ( UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any ):
"""simple docstring"""
for attribute in key.split(""".""" ):
__lowerCamelCase : Any = getattr(UpperCAmelCase , UpperCAmelCase )
if weight_type is not None:
__lowerCamelCase : List[Any] = getattr(UpperCAmelCase , UpperCAmelCase ).shape
else:
__lowerCamelCase : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__lowerCamelCase : List[Any] = value
elif weight_type == "weight_g":
__lowerCamelCase : Tuple = value
elif weight_type == "weight_v":
__lowerCamelCase : List[Any] = value
elif weight_type == "bias":
__lowerCamelCase : Optional[int] = value
else:
__lowerCamelCase : Dict = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _UpperCAmelCase ( UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase : Tuple = []
__lowerCamelCase : Union[str, Any] = fairseq_model.state_dict()
__lowerCamelCase : Optional[Any] = hf_model.feature_extractor
__lowerCamelCase : List[str] = hf_model.adapter
for name, value in fairseq_dict.items():
__lowerCamelCase : List[str] = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
__lowerCamelCase : Any = True
elif any(x in name for x in ["""adaptor""", """w2v_encoder.proj.""", """w2v_proj_ln."""] ):
load_adapter(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : Dict = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__lowerCamelCase : Union[str, Any] = True
if "*" in mapped_key:
__lowerCamelCase : int = name.split(UpperCAmelCase )[0].split(""".""" )[-2]
__lowerCamelCase : Dict = mapped_key.replace("""*""" , UpperCAmelCase )
if "weight_g" in name:
__lowerCamelCase : int = """weight_g"""
elif "weight_v" in name:
__lowerCamelCase : List[str] = """weight_v"""
elif "bias" in name:
__lowerCamelCase : str = """bias"""
elif "weight" in name:
__lowerCamelCase : List[str] = """weight"""
else:
__lowerCamelCase : int = None
set_recursively(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
continue
if not is_used:
unused_weights.append(UpperCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _UpperCAmelCase ( UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase : List[str] = full_name.split("""conv_layers.""" )[-1]
__lowerCamelCase : Tuple = name.split(""".""" )
__lowerCamelCase : Tuple = int(items[0] )
__lowerCamelCase : str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__lowerCamelCase : Dict = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__lowerCamelCase : List[str] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__lowerCamelCase : Any = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__lowerCamelCase : str = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCAmelCase )
def _UpperCAmelCase ( UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] ):
"""simple docstring"""
__lowerCamelCase : Union[str, Any] = full_name.split("""adaptor.""" )[-1]
__lowerCamelCase : Any = name.split(""".""" )
if items[1].isdigit():
__lowerCamelCase : Dict = int(items[1] )
else:
__lowerCamelCase : List[str] = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."""
__lowerCamelCase : str = value
logger.info(f"""Adapter proj layer norm bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."""
__lowerCamelCase : str = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."""
__lowerCamelCase : Optional[int] = value
logger.info(f"""Adapter proj layer bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."""
__lowerCamelCase : List[str] = value
logger.info(f"""Adapter proj layer weight was initialized from {full_name}.""" )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."""
__lowerCamelCase : int = value
logger.info(f"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."""
__lowerCamelCase : Tuple = value
logger.info(f"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCAmelCase )
def _UpperCAmelCase ( UpperCAmelCase : List[Any] ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase : Tuple = emb.weight.shape
__lowerCamelCase : Any = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
__lowerCamelCase : List[Any] = emb.weight.data
return lin_layer
@torch.no_grad()
def _UpperCAmelCase ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , ):
"""simple docstring"""
__lowerCamelCase : int = WavaVecaConfig.from_pretrained(
UpperCAmelCase , add_adapter=UpperCAmelCase , adapter_stride=UpperCAmelCase , adapter_kernel_size=UpperCAmelCase , use_auth_token=UpperCAmelCase , output_hidden_size=UpperCAmelCase , )
__lowerCamelCase : Optional[int] = MBartConfig.from_pretrained(UpperCAmelCase )
# load model
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"""config_yaml""": config_yaml_path,
"""data""": """/""".join(dict_path.split("""/""" )[:-1] ),
"""w2v_path""": checkpoint_path,
"""load_pretrained_decoder_from""": None,
} , )
__lowerCamelCase : Union[str, Any] = model[0].eval()
# load feature extractor
__lowerCamelCase : Any = WavaVecaFeatureExtractor.from_pretrained(UpperCAmelCase , use_auth_token=UpperCAmelCase )
# set weights for wav2vec2 encoder
__lowerCamelCase : Tuple = WavaVecaModel(UpperCAmelCase )
recursively_load_weights_wavaveca(model.encoder , UpperCAmelCase )
# load decoder weights
__lowerCamelCase : Dict = MBartForCausalLM(UpperCAmelCase )
__lowerCamelCase , __lowerCamelCase : int = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCAmelCase )
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
__lowerCamelCase : List[Any] = SpeechEncoderDecoderModel(encoder=UpperCAmelCase , decoder=UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : List[str] = MBartaaTokenizer(UpperCAmelCase )
tokenizer.save_pretrained(UpperCAmelCase )
__lowerCamelCase : int = hf_wavavec.config.to_dict()
__lowerCamelCase : str = tokenizer.pad_token_id
__lowerCamelCase : Optional[Any] = tokenizer.bos_token_id
__lowerCamelCase : Dict = tokenizer.eos_token_id
__lowerCamelCase : Tuple = """mbart50"""
__lowerCamelCase : List[str] = """wav2vec2"""
__lowerCamelCase : List[str] = tokenizer.eos_token_id
__lowerCamelCase : Optional[int] = 250_004
__lowerCamelCase : Dict = tokenizer.eos_token_id
__lowerCamelCase : Optional[Any] = SpeechEncoderDecoderConfig.from_dict(UpperCAmelCase )
hf_wavavec.save_pretrained(UpperCAmelCase )
feature_extractor.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
__UpperCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_yaml_path', default=None, type=str, help='Path to yaml file of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-xls-r-1b',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/mbart-large-50-one-to-many-mmt',
type=str,
help='Path to hf decoder checkpoint config',
)
parser.add_argument('--add_adapter', default=True, type=bool, help='whethere to add model adapter layers')
parser.add_argument('--adapter_stride', default=2, type=int, help='stride of adapter layers')
parser.add_argument('--adapter_kernel_size', default=3, type=int, help='kernel size of adapter layers')
parser.add_argument('--encoder_output_dim', default=1024, type=int, help='encoder output dim')
parser.add_argument('--start_token_id', default=250004, type=int, help='`decoder_start_token_id` of model config')
__UpperCamelCase : int = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 458 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( __snake_case ) -> bool:
"""simple docstring"""
if len(__snake_case ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
_UpperCamelCase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 19 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_a = logging.get_logger(__name__)
_a = {
"""facebook/detr-resnet-50""": """https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json""",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'detr'
lowercase__ = ['past_key_values']
lowercase__ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __a=True , __a=None , __a=3 , __a=1_00 , __a=6 , __a=20_48 , __a=8 , __a=6 , __a=20_48 , __a=8 , __a=0.0 , __a=0.0 , __a=True , __a="relu" , __a=2_56 , __a=0.1 , __a=0.0 , __a=0.0 , __a=0.02 , __a=1.0 , __a=False , __a="sine" , __a="resnet50" , __a=True , __a=False , __a=1 , __a=5 , __a=2 , __a=1 , __a=1 , __a=5 , __a=2 , __a=0.1 , **__a , ) -> int:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''')
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
_UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''])
elif isinstance(__a , __a):
_UpperCamelCase = backbone_config.get('''model_type''')
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(__a)
# set timm attributes to None
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None, None, None
_UpperCamelCase = use_timm_backbone
_UpperCamelCase = backbone_config
_UpperCamelCase = num_channels
_UpperCamelCase = num_queries
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = encoder_layers
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
_UpperCamelCase = backbone
_UpperCamelCase = use_pretrained_backbone
_UpperCamelCase = dilation
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
super().__init__(is_encoder_decoder=__a , **__a)
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self.d_model
@classmethod
def UpperCAmelCase ( cls , __a , **__a) -> int:
'''simple docstring'''
return cls(backbone_config=__a , **__a)
def UpperCAmelCase ( self) -> Dict[str, any]:
'''simple docstring'''
_UpperCamelCase = copy.deepcopy(self.__dict__)
if output["backbone_config"] is not None:
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = version.parse('1.11' )
@property
def UpperCAmelCase ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
])
@property
def UpperCAmelCase ( self) -> float:
'''simple docstring'''
return 1e-5
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return 12
| 19 | 1 |
from collections.abc import Generator
from math import sin
def _lowercase ( SCREAMING_SNAKE_CASE_ : bytes ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE_ ) != 32:
raise ValueError("""Input must be of length 32""" )
UpperCamelCase = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _lowercase ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if i < 0:
raise ValueError("""Input must be non-negative""" )
UpperCamelCase = format(SCREAMING_SNAKE_CASE_ , """08x""" )[-8:]
UpperCamelCase = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("""utf-8""" )
return little_endian_hex
def _lowercase ( SCREAMING_SNAKE_CASE_ : bytes ):
"""simple docstring"""
UpperCamelCase = b""
for char in message:
bit_string += format(SCREAMING_SNAKE_CASE_ , """08b""" ).encode("""utf-8""" )
UpperCamelCase = format(len(SCREAMING_SNAKE_CASE_ ) , """064b""" ).encode("""utf-8""" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(SCREAMING_SNAKE_CASE_ ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def _lowercase ( SCREAMING_SNAKE_CASE_ : bytes ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE_ ) % 512 != 0:
raise ValueError("""Input must have length that's a multiple of 512""" )
for pos in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 512 ):
UpperCamelCase = bit_string[pos : pos + 512]
UpperCamelCase = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def _lowercase ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if i < 0:
raise ValueError("""Input must be non-negative""" )
UpperCamelCase = format(SCREAMING_SNAKE_CASE_ , """032b""" )
UpperCamelCase = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(SCREAMING_SNAKE_CASE_ , 2 )
def _lowercase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
return (a + b) % 2**32
def _lowercase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if i < 0:
raise ValueError("""Input must be non-negative""" )
if shift < 0:
raise ValueError("""Shift must be non-negative""" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def _lowercase ( SCREAMING_SNAKE_CASE_ : bytes ):
"""simple docstring"""
UpperCamelCase = preprocess(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
UpperCamelCase = 0x67_45_23_01
UpperCamelCase = 0xef_cd_ab_89
UpperCamelCase = 0x98_ba_dc_fe
UpperCamelCase = 0x10_32_54_76
UpperCamelCase = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase = aa
UpperCamelCase = ba
UpperCamelCase = ca
UpperCamelCase = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
UpperCamelCase = d ^ (b & (c ^ d))
UpperCamelCase = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
UpperCamelCase = c ^ (d & (b ^ c))
UpperCamelCase = (5 * i + 1) % 16
elif i <= 47:
UpperCamelCase = b ^ c ^ d
UpperCamelCase = (3 * i + 5) % 16
else:
UpperCamelCase = c ^ (b | not_aa(SCREAMING_SNAKE_CASE_ ))
UpperCamelCase = (7 * i) % 16
UpperCamelCase = (f + a + added_consts[i] + block_words[g]) % 2**32
UpperCamelCase = d
UpperCamelCase = c
UpperCamelCase = b
UpperCamelCase = sum_aa(SCREAMING_SNAKE_CASE_ , left_rotate_aa(SCREAMING_SNAKE_CASE_ , shift_amounts[i] ) )
# Add hashed chunk to running total
UpperCamelCase = sum_aa(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase = sum_aa(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase = sum_aa(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase = sum_aa(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase = reformat_hex(SCREAMING_SNAKE_CASE_ ) + reformat_hex(SCREAMING_SNAKE_CASE_ ) + reformat_hex(SCREAMING_SNAKE_CASE_ ) + reformat_hex(SCREAMING_SNAKE_CASE_ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
def _lowercase ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase = int(SCREAMING_SNAKE_CASE_ )
if decimal in (0, 1): # Exit cases for the recursion
return str(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase = divmod(SCREAMING_SNAKE_CASE_ , 2 )
return binary_recursive(SCREAMING_SNAKE_CASE_ ) + str(SCREAMING_SNAKE_CASE_ )
def _lowercase ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase = str(SCREAMING_SNAKE_CASE_ ).strip()
if not number:
raise ValueError("""No input value was provided""" )
UpperCamelCase = """-""" if number.startswith("""-""" ) else """"""
UpperCamelCase = number.lstrip("""-""" )
if not number.isnumeric():
raise ValueError("""Input value is not an integer""" )
return f'{negative}0b{binary_recursive(int(SCREAMING_SNAKE_CASE_ ) )}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 181 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class __snake_case ( snake_case__ ):
"""simple docstring"""
lowerCAmelCase_ : Any = 42
lowerCAmelCase_ : int = 42
lowerCAmelCase_ : Optional[Any] = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 388 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCAmelCase = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 137 | 0 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = F'Input value of [number={number}] must be an integer'
raise TypeError(SCREAMING_SNAKE_CASE )
if number < 0:
return False
UpperCamelCase__ = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
"""simple docstring"""
import sys
from collections import defaultdict
class __lowerCamelCase :
def __init__( self ) -> Tuple:
UpperCamelCase__ = []
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
return self.node_position[vertex]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Optional[int]:
UpperCamelCase__ = pos
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
UpperCamelCase__ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
UpperCamelCase__ = 2 * start + 1
else:
UpperCamelCase__ = 2 * start + 2
if heap[smallest_child] < heap[start]:
UpperCamelCase__ , UpperCamelCase__ = heap[smallest_child], positions[smallest_child]
UpperCamelCase__ , UpperCamelCase__ = (
heap[start],
positions[start],
)
UpperCamelCase__ , UpperCamelCase__ = temp, tempa
UpperCamelCase__ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , snake_case_ )
self.top_to_bottom(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Any:
UpperCamelCase__ = position[index]
while index != 0:
UpperCamelCase__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
UpperCamelCase__ = heap[parent]
UpperCamelCase__ = position[parent]
self.set_position(position[parent] , snake_case_ )
else:
UpperCamelCase__ = val
UpperCamelCase__ = temp
self.set_position(snake_case_ , snake_case_ )
break
UpperCamelCase__ = parent
else:
UpperCamelCase__ = val
UpperCamelCase__ = temp
self.set_position(snake_case_ , 0 )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Any:
UpperCamelCase__ = len(snake_case_ ) // 2 - 1
for i in range(snake_case_ , -1 , -1 ):
self.top_to_bottom(snake_case_ , snake_case_ , len(snake_case_ ) , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = positions[0]
UpperCamelCase__ = sys.maxsize
self.top_to_bottom(snake_case_ , 0 , len(snake_case_ ) , snake_case_ )
return temp
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = Heap()
UpperCamelCase__ = [0] * len(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = [-1] * len(SCREAMING_SNAKE_CASE ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
UpperCamelCase__ = [] # Heap of Distance of vertices from their neighboring vertex
UpperCamelCase__ = []
for vertex in range(len(SCREAMING_SNAKE_CASE ) ):
distance_tv.append(sys.maxsize )
positions.append(SCREAMING_SNAKE_CASE )
heap.node_position.append(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = []
UpperCamelCase__ = 1
UpperCamelCase__ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
UpperCamelCase__ = 0
UpperCamelCase__ = distance
heap.heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for _ in range(1 , len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ = heap.delete_minimum(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
UpperCamelCase__ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(SCREAMING_SNAKE_CASE )]
):
UpperCamelCase__ = distance
heap.bottom_to_top(
SCREAMING_SNAKE_CASE , heap.get_position(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A__ : Dict= int(input("""Enter number of edges: """).strip())
A__ : Dict= defaultdict(list)
for _ in range(edges_number):
A__ : Dict= [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 20 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] =None
SCREAMING_SNAKE_CASE_: Any =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Dict ={'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE_: Optional[int] ={
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
'tokenizer_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE_: int ={
'google/rembert': 2_56,
}
SCREAMING_SNAKE_CASE_: Optional[Any] ='▁'
class __A ( UpperCamelCase__ ):
a__ : Optional[Any] = VOCAB_FILES_NAMES
a__ : Dict = PRETRAINED_VOCAB_FILES_MAP
a__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Any = RemBertTokenizer
def __init__(self : Dict , __a : List[Any]=None , __a : List[Any]=None , __a : Dict=True , __a : Any=True , __a : Optional[Any]=False , __a : List[Any]="[CLS]" , __a : Dict="[SEP]" , __a : str="<unk>" , __a : int="[SEP]" , __a : Dict="<pad>" , __a : List[str]="[CLS]" , __a : Optional[int]="[MASK]" , **__a : List[Any] , ):
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
super().__init__(
__a , tokenizer_file=__a , do_lower_case=__a , remove_space=__a , keep_accents=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , **__a , )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = remove_space
UpperCAmelCase_ = keep_accents
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = False if not self.vocab_file else True
def _lowercase (self : List[str] , __a : List[int] , __a : Optional[List[int]] = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowercase (self : Tuple , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1]
def _lowercase (self : Any , __a : List[int] , __a : Optional[List[int]] = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase (self : Dict , __a : str , __a : Optional[str] = None ):
if not os.path.isdir(__a ):
logger.error("Vocabulary path ({}) should be a directory".format(__a ) )
return
UpperCAmelCase_ = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ):
copyfile(self.vocab_file , __a )
return (out_vocab_file,)
| 78 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowercase : List[str] = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Tuple = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
__lowercase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure) | 142 | 0 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class _a ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
self.check_model_type(lowerCAmelCase_ )
def __lowerCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ):
_lowercase , _lowercase ={}, {}
if padding is not None:
_lowercase =padding
if truncation is not None:
_lowercase =truncation
if top_k is not None:
_lowercase =top_k
return preprocess_params, {}, postprocess_params
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ ):
if isinstance(lowerCAmelCase_ , (Image.Image, str) ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_lowercase ={"image": image, "question": question}
else:
_lowercase =image
_lowercase =super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
return results
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ):
_lowercase =load_image(inputs["image"] )
_lowercase =self.tokenizer(
inputs["question"] , return_tensors=self.framework , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ )
_lowercase =self.image_processor(images=lowerCAmelCase_ , return_tensors=self.framework )
model_inputs.update(lowerCAmelCase_ )
return model_inputs
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
_lowercase =self.model(**lowerCAmelCase_ )
return model_outputs
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=5 ):
if top_k > self.model.config.num_labels:
_lowercase =self.model.config.num_labels
if self.framework == "pt":
_lowercase =model_outputs.logits.sigmoid()[0]
_lowercase , _lowercase =probs.topk(lowerCAmelCase_ )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
_lowercase =scores.tolist()
_lowercase =ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowerCAmelCase_ , lowerCAmelCase_ )]
| 717 | from math import factorial
def __lowerCamelCase ( __a : int , __a : int , __a : float ) -> float:
if successes > trials:
raise ValueError("successes must be lower or equal to trials" )
if trials < 0 or successes < 0:
raise ValueError("the function is defined for non-negative integers" )
if not isinstance(__a , __a ) or not isinstance(__a , __a ):
raise ValueError("the function is defined for non-negative integers" )
if not 0 < prob < 1:
raise ValueError("prob has to be in range of 1 - 0" )
_lowercase =(prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_lowercase =float(factorial(__a ) )
coefficient /= factorial(__a ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("Probability of 2 successes out of 4 trails")
print("with probability of 0.75 is:", end=" ")
print(binomial_distribution(2, 4, 0.7_5))
| 594 | 0 |
'''simple docstring'''
def __A ( a_ : int ):
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(a_ ,a_ ):
raise TypeError("Input value must be a 'int' type" )
return bin(a_ ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 525 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class lowerCamelCase :
snake_case_ = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case_ = field(
default=_A , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
snake_case_ = field(
default=_A , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
snake_case_ = field(
default=_A , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
snake_case_ = field(
default=_A , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
snake_case_ = field(
default=_A , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
@dataclass
class lowerCamelCase :
snake_case_ = field(
default=_A , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case_ = field(
default=_A , metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."} )
snake_case_ = field(
default=_A , metadata={"help": "Train language if it is different from the evaluation language."} )
snake_case_ = field(
default=_A , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case_ = field(
default=_A , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case_ = field(
default=_A , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
snake_case_ = field(
default=_A , metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} , )
snake_case_ = field(
default=_A , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
snake_case_ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
snake_case_ = field(
default=_A , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
snake_case_ = field(
default=_A , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def __A ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCAmelCase : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Tuple = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_xnli" ,a_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" ,datefmt="%m/%d/%Y %H:%M:%S" ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase : str = training_args.get_process_log_level()
logger.setLevel(a_ )
datasets.utils.logging.set_verbosity(a_ )
transformers.utils.logging.set_verbosity(a_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowerCAmelCase : Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowerCAmelCase : Any = load_dataset(
"xnli" ,model_args.language ,split="train" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
lowerCAmelCase : Union[str, Any] = load_dataset(
"xnli" ,model_args.train_language ,split="train" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCAmelCase : int = train_dataset.features["label"].names
if training_args.do_eval:
lowerCAmelCase : Any = load_dataset(
"xnli" ,model_args.language ,split="validation" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCAmelCase : Dict = eval_dataset.features["label"].names
if training_args.do_predict:
lowerCAmelCase : Optional[Any] = load_dataset(
"xnli" ,model_args.language ,split="test" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCAmelCase : List[Any] = predict_dataset.features["label"].names
# Labels
lowerCAmelCase : Tuple = len(a_ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=a_ ,idalabel={str(a_ ): label for i, label in enumerate(a_ )} ,labelaid={label: i for i, label in enumerate(a_ )} ,finetuning_task="xnli" ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,do_lower_case=model_args.do_lower_case ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCAmelCase : List[Any] = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(".ckpt" in model_args.model_name_or_path ) ,config=a_ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,)
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowerCAmelCase : int = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCAmelCase : Union[str, Any] = False
def preprocess_function(a_ : Dict ):
# Tokenize the texts
return tokenizer(
examples["premise"] ,examples["hypothesis"] ,padding=a_ ,max_length=data_args.max_seq_length ,truncation=a_ ,)
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCAmelCase : Tuple = min(len(a_ ) ,data_args.max_train_samples )
lowerCAmelCase : int = train_dataset.select(range(a_ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
lowerCAmelCase : Optional[int] = train_dataset.map(
a_ ,batched=a_ ,load_from_cache_file=not data_args.overwrite_cache ,desc="Running tokenizer on train dataset" ,)
# Log a few random samples from the training set:
for index in random.sample(range(len(a_ ) ) ,3 ):
logger.info(f'''Sample {index} of the training set: {train_dataset[index]}.''' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCAmelCase : int = min(len(a_ ) ,data_args.max_eval_samples )
lowerCAmelCase : Optional[Any] = eval_dataset.select(range(a_ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
lowerCAmelCase : Optional[Any] = eval_dataset.map(
a_ ,batched=a_ ,load_from_cache_file=not data_args.overwrite_cache ,desc="Running tokenizer on validation dataset" ,)
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowerCAmelCase : Optional[Any] = min(len(a_ ) ,data_args.max_predict_samples )
lowerCAmelCase : Optional[Any] = predict_dataset.select(range(a_ ) )
with training_args.main_process_first(desc="prediction dataset map pre-processing" ):
lowerCAmelCase : Tuple = predict_dataset.map(
a_ ,batched=a_ ,load_from_cache_file=not data_args.overwrite_cache ,desc="Running tokenizer on prediction dataset" ,)
# Get the metric function
lowerCAmelCase : str = evaluate.load("xnli" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(a_ : EvalPrediction ):
lowerCAmelCase : Union[str, Any] = p.predictions[0] if isinstance(p.predictions ,a_ ) else p.predictions
lowerCAmelCase : Any = np.argmax(a_ ,axis=1 )
return metric.compute(predictions=a_ ,references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCAmelCase : Optional[Any] = default_data_collator
elif training_args.fpaa:
lowerCAmelCase : Union[str, Any] = DataCollatorWithPadding(a_ ,pad_to_multiple_of=8 )
else:
lowerCAmelCase : str = None
# Initialize our Trainer
lowerCAmelCase : Tuple = Trainer(
model=a_ ,args=a_ ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,compute_metrics=a_ ,tokenizer=a_ ,data_collator=a_ ,)
# Training
if training_args.do_train:
lowerCAmelCase : str = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase : List[str] = last_checkpoint
lowerCAmelCase : Optional[int] = trainer.train(resume_from_checkpoint=a_ )
lowerCAmelCase : List[Any] = train_result.metrics
lowerCAmelCase : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(a_ )
)
lowerCAmelCase : Any = min(a_ ,len(a_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" ,a_ )
trainer.save_metrics("train" ,a_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowerCAmelCase : Optional[int] = trainer.evaluate(eval_dataset=a_ )
lowerCAmelCase : Optional[int] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(a_ )
lowerCAmelCase : Any = min(a_ ,len(a_ ) )
trainer.log_metrics("eval" ,a_ )
trainer.save_metrics("eval" ,a_ )
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***" )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[Any] = trainer.predict(a_ ,metric_key_prefix="predict" )
lowerCAmelCase : List[Any] = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(a_ )
)
lowerCAmelCase : Any = min(a_ ,len(a_ ) )
trainer.log_metrics("predict" ,a_ )
trainer.save_metrics("predict" ,a_ )
lowerCAmelCase : Optional[int] = np.argmax(a_ ,axis=1 )
lowerCAmelCase : Any = os.path.join(training_args.output_dir ,"predictions.txt" )
if trainer.is_world_process_zero():
with open(a_ ,"w" ) as writer:
writer.write("index\tprediction\n" )
for index, item in enumerate(a_ ):
lowerCAmelCase : str = label_list[item]
writer.write(f'''{index}\t{item}\n''' )
if __name__ == "__main__":
main()
| 525 | 1 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__UpperCamelCase : Optional[Any] = 16
__UpperCamelCase : Optional[Any] = 32
def _UpperCAmelCase ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any = 16 , UpperCAmelCase : Any = "bert-base-cased" ):
"""simple docstring"""
__lowerCamelCase : List[str] = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
__lowerCamelCase : Optional[int] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(UpperCAmelCase : str ):
# max_length=None => use the model max length (it's actually the default)
__lowerCamelCase : Union[str, Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowerCamelCase : Optional[int] = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowerCAmelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCamelCase : int = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(UpperCAmelCase : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCAmelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowerCAmelCase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
__lowerCamelCase : Dict = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
__lowerCamelCase : Union[str, Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
return train_dataloader, eval_dataloader
def _UpperCAmelCase ( UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : str , UpperCAmelCase : List[str] ):
"""simple docstring"""
model.eval()
__lowerCamelCase : Optional[int] = 0
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCamelCase : Any = model(**lowerCAmelCase__ )
__lowerCamelCase : Tuple = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__lowerCamelCase , __lowerCamelCase : List[str] = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowerCAmelCase__ ) - 1:
__lowerCamelCase : Any = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__lowerCamelCase : int = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , )
__lowerCamelCase : Union[str, Any] = metric.compute()
return eval_metric["accuracy"]
def _UpperCAmelCase ( UpperCAmelCase : Dict , UpperCAmelCase : int ):
"""simple docstring"""
__lowerCamelCase : Dict = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCamelCase : List[Any] = config["""lr"""]
__lowerCamelCase : Dict = int(config["""num_epochs"""] )
__lowerCamelCase : Any = int(config["""seed"""] )
__lowerCamelCase : Optional[int] = int(config["""batch_size"""] )
__lowerCamelCase : int = args.model_name_or_path
set_seed(lowerCAmelCase__ )
__lowerCamelCase , __lowerCamelCase : Tuple = get_dataloaders(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCamelCase : List[Any] = AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
# Instantiate optimizer
__lowerCamelCase : Optional[int] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__lowerCamelCase : Dict = optimizer_cls(params=model.parameters() , lr=lowerCAmelCase__ )
if accelerator.state.deepspeed_plugin is not None:
__lowerCamelCase : Any = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
__lowerCamelCase : Any = 1
__lowerCamelCase : int = (len(lowerCAmelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__lowerCamelCase : Dict = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase__ , num_warmup_steps=0 , num_training_steps=lowerCAmelCase__ , )
else:
__lowerCamelCase : int = DummyScheduler(lowerCAmelCase__ , total_num_steps=lowerCAmelCase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# We need to keep track of how many total steps we have iterated over
__lowerCamelCase : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
__lowerCamelCase : Union[str, Any] = 0
__lowerCamelCase : Any = evaluate.load("""glue""" , """mrpc""" )
__lowerCamelCase : Tuple = num_epochs
if args.partial_train_epoch is not None:
__lowerCamelCase : Dict = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__lowerCamelCase : int = args.resume_from_checkpoint.split("""epoch_""" )[1]
__lowerCamelCase : Dict = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__lowerCamelCase : Dict = int(lowerCAmelCase__ ) + 1
__lowerCamelCase : Optional[int] = evaluation_loop(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
accelerator.print("""resumed checkpoint performance:""" , lowerCAmelCase__ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , """r""" ) as f:
__lowerCamelCase : List[str] = json.load(lowerCAmelCase__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__lowerCamelCase : Tuple = {}
for epoch in range(lowerCAmelCase__ , lowerCAmelCase__ ):
model.train()
for step, batch in enumerate(lowerCAmelCase__ ):
__lowerCamelCase : Any = model(**lowerCAmelCase__ )
__lowerCamelCase : Dict = outputs.loss
__lowerCamelCase : str = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__lowerCamelCase : Dict = f"""epoch_{epoch}"""
__lowerCamelCase : Dict = os.path.join(args.output_dir , lowerCAmelCase__ )
accelerator.save_state(lowerCAmelCase__ )
__lowerCamelCase : Dict = evaluation_loop(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__lowerCamelCase : str = accuracy
__lowerCamelCase : Optional[int] = lr_scheduler.get_lr()[0]
__lowerCamelCase : int = optimizer.param_groups[0]["""lr"""]
__lowerCamelCase : Optional[int] = epoch
__lowerCamelCase : Union[str, Any] = overall_step
accelerator.print(f"""epoch {epoch}:""" , lowerCAmelCase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , """w""" ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def _UpperCAmelCase ( ):
"""simple docstring"""
__lowerCamelCase : int = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowerCAmelCase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowerCAmelCase__ , )
parser.add_argument(
"""--output_dir""" , type=lowerCAmelCase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowerCAmelCase__ , default=2 , help="""Number of train epochs.""" , )
__lowerCamelCase : List[str] = parser.parse_args()
__lowerCamelCase : Union[str, Any] = {"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 714 |
def _UpperCAmelCase ( UpperCAmelCase : str ):
"""simple docstring"""
__lowerCamelCase : List[Any] = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
__lowerCamelCase : Dict = hex_num[0] == """-"""
if is_negative:
__lowerCamelCase : Optional[Any] = hex_num[1:]
try:
__lowerCamelCase : Any = int(UpperCAmelCase , 16 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
__lowerCamelCase : List[str] = """"""
while int_num > 0:
__lowerCamelCase : Dict = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 458 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCamelCase__ ( __lowerCamelCase : Tuple ):
__UpperCAmelCase : str = [2, 2, 6, 2] if """tiny""" in model_name else [2, 2, 18, 2]
__UpperCAmelCase : Any = True if """large""" in model_name or """huge""" in model_name else False
__UpperCAmelCase : int = True if """large""" in model_name or """huge""" in model_name else False
__UpperCAmelCase : Optional[int] = True if """large""" in model_name or """huge""" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__UpperCAmelCase : Union[str, Any] = [3, 3, 3, 3]
__UpperCAmelCase : Union[str, Any] = [5, 5, 5, 5]
elif "fl4" in model_name:
__UpperCAmelCase : str = [4, 4, 4, 4]
__UpperCAmelCase : Optional[Any] = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__UpperCAmelCase : Dict = [3, 3, 3, 3]
if "lrf" in model_name:
__UpperCAmelCase : Optional[Any] = [3, 3, 3, 3]
else:
__UpperCAmelCase : Optional[int] = [2, 2, 2, 2]
if "tiny" in model_name:
__UpperCAmelCase : List[str] = 96
elif "small" in model_name:
__UpperCAmelCase : Dict = 96
elif "base" in model_name:
__UpperCAmelCase : List[Any] = 128
elif "large" in model_name:
__UpperCAmelCase : Any = 192
elif "xlarge" in model_name:
__UpperCAmelCase : Tuple = 256
elif "huge" in model_name:
__UpperCAmelCase : int = 352
# set label information
__UpperCAmelCase : Tuple = """huggingface/label-files"""
if "large" in model_name or "huge" in model_name:
__UpperCAmelCase : Any = """imagenet-22k-id2label.json"""
else:
__UpperCAmelCase : Dict = """imagenet-1k-id2label.json"""
__UpperCAmelCase : str = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__UpperCAmelCase : Optional[int] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
__UpperCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
__UpperCAmelCase : List[str] = FocalNetConfig(
embed_dim=__lowerCamelCase , depths=__lowerCamelCase , focal_levels=__lowerCamelCase , focal_windows=__lowerCamelCase , use_conv_embed=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase , use_post_layernorm=__lowerCamelCase , use_layerscale=__lowerCamelCase , )
return config
def lowerCamelCase__ ( __lowerCamelCase : Tuple ):
if "patch_embed.proj" in name:
__UpperCAmelCase : List[Any] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__UpperCAmelCase : Dict = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
__UpperCAmelCase : int = """encoder.""" + name
if "encoder.layers" in name:
__UpperCAmelCase : Optional[int] = name.replace("""encoder.layers""" , """encoder.stages""" )
if "downsample.proj" in name:
__UpperCAmelCase : Optional[Any] = name.replace("""downsample.proj""" , """downsample.projection""" )
if "blocks" in name:
__UpperCAmelCase : Union[str, Any] = name.replace("""blocks""" , """layers""" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__UpperCAmelCase : List[str] = name.replace("""modulation.f""" , """modulation.projection_in""" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__UpperCAmelCase : List[Any] = name.replace("""modulation.h""" , """modulation.projection_context""" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__UpperCAmelCase : str = name.replace("""modulation.proj""" , """modulation.projection_out""" )
if name == "norm.weight":
__UpperCAmelCase : Optional[Any] = """layernorm.weight"""
if name == "norm.bias":
__UpperCAmelCase : Dict = """layernorm.bias"""
if "head" in name:
__UpperCAmelCase : Tuple = name.replace("""head""" , """classifier""" )
else:
__UpperCAmelCase : int = """focalnet.""" + name
return name
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str]=False ):
# fmt: off
__UpperCAmelCase : Dict = {
"""focalnet-tiny""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth""",
"""focalnet-tiny-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth""",
"""focalnet-small""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth""",
"""focalnet-small-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth""",
"""focalnet-base""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth""",
"""focalnet-base-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth""",
"""focalnet-large-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth""",
"""focalnet-large-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth""",
"""focalnet-xlarge-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth""",
"""focalnet-xlarge-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth""",
}
# fmt: on
__UpperCAmelCase : int = model_name_to_url[model_name]
print("""Checkpoint URL: """ , __lowerCamelCase )
__UpperCAmelCase : Dict = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location="""cpu""" )["""model"""]
# rename keys
for key in state_dict.copy().keys():
__UpperCAmelCase : Tuple = state_dict.pop(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = val
__UpperCAmelCase : Optional[Any] = get_focalnet_config(__lowerCamelCase )
__UpperCAmelCase : Any = FocalNetForImageClassification(__lowerCamelCase )
model.eval()
# load state dict
model.load_state_dict(__lowerCamelCase )
# verify conversion
__UpperCAmelCase : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__UpperCAmelCase : Union[str, Any] = BitImageProcessor(
do_resize=__lowerCamelCase , size={"""shortest_edge""": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=__lowerCamelCase , crop_size=224 , do_normalize=__lowerCamelCase , image_mean=__lowerCamelCase , image_std=__lowerCamelCase , )
__UpperCAmelCase : Optional[Any] = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
__UpperCAmelCase : List[Any] = processor(images=__lowerCamelCase , return_tensors="""pt""" )
__UpperCAmelCase : str = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
__UpperCAmelCase : List[Any] = image_transforms(__lowerCamelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , __lowerCamelCase , atol=1E-4 )
__UpperCAmelCase : Dict = model(**__lowerCamelCase )
__UpperCAmelCase : Any = outputs.logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
print("""First values of logits:""" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__UpperCAmelCase : Union[str, Any] = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
__UpperCAmelCase : Dict = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
__UpperCAmelCase : int = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
__UpperCAmelCase : int = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
__UpperCAmelCase : Optional[int] = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
__UpperCAmelCase : Optional[Any] = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print(f"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(f"""{model_name}""" )
processor.push_to_hub(f"""{model_name}""" )
if __name__ == "__main__":
a : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
a : Any = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 63 |
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'MCTCTFeatureExtractor'
lowerCAmelCase__ = 'AutoTokenizer'
def __init__( self , lowercase , lowercase ) -> str:
super().__init__(lowercase , lowercase )
lowerCamelCase_ = self.feature_extractor
lowerCamelCase_ = False
def __call__( self , *lowercase , **lowercase ) -> List[str]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowercase , **lowercase )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
lowerCamelCase_ = kwargs.pop("raw_speech" )
else:
lowerCamelCase_ = kwargs.pop("audio" , lowercase )
lowerCamelCase_ = kwargs.pop("sampling_rate" , lowercase )
lowerCamelCase_ = kwargs.pop("text" , lowercase )
if len(lowercase ) > 0:
lowerCamelCase_ = args[0]
lowerCamelCase_ = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
lowerCamelCase_ = self.feature_extractor(lowercase , *lowercase , sampling_rate=lowercase , **lowercase )
if text is not None:
lowerCamelCase_ = self.tokenizer(lowercase , **lowercase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase_ = encodings["input_ids"]
return inputs
def SCREAMING_SNAKE_CASE_( self , *lowercase , **lowercase ) -> Optional[Any]:
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self , *lowercase , **lowercase ) -> Optional[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*lowercase , **lowercase )
lowerCamelCase_ = kwargs.pop("input_features" , lowercase )
lowerCamelCase_ = kwargs.pop("labels" , lowercase )
if len(lowercase ) > 0:
lowerCamelCase_ = args[0]
lowerCamelCase_ = args[1:]
if input_features is not None:
lowerCamelCase_ = self.feature_extractor.pad(lowercase , *lowercase , **lowercase )
if labels is not None:
lowerCamelCase_ = self.tokenizer.pad(lowercase , **lowercase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
lowerCamelCase_ = labels["input_ids"]
return input_features
def SCREAMING_SNAKE_CASE_( self , *lowercase , **lowercase ) -> List[str]:
return self.tokenizer.decode(*lowercase , **lowercase )
@contextmanager
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
lowerCamelCase_ = True
lowerCamelCase_ = self.tokenizer
yield
lowerCamelCase_ = self.feature_extractor
lowerCamelCase_ = False
| 463 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 102 |
def __lowercase ( UpperCAmelCase__ = 10 , UpperCAmelCase__ = 1_000 , UpperCAmelCase__ = True ):
"""simple docstring"""
assert (
isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('Invalid value for min_val or max_val (min_value < max_value)' )
return min_val if option else max_val
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
return int((number_a + number_a) / 2 )
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
assert (
isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('argument value for lower and higher must be(lower > higher)' )
if not lower < to_guess < higher:
raise ValueError(
'guess value must be within the range of lower and higher value' )
def answer(UpperCAmelCase__ ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('started...' )
__lowerCAmelCase = lower
__lowerCAmelCase = higher
__lowerCAmelCase = []
while True:
__lowerCAmelCase = get_avg(UpperCAmelCase__ , UpperCAmelCase__ )
last_numbers.append(UpperCAmelCase__ )
if answer(UpperCAmelCase__ ) == "low":
__lowerCAmelCase = number
elif answer(UpperCAmelCase__ ) == "high":
__lowerCAmelCase = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""" )
print(F"""details : {last_numbers!s}""" )
def __lowercase ( ):
"""simple docstring"""
__lowerCAmelCase = int(input('Enter lower value : ' ).strip() )
__lowerCAmelCase = int(input('Enter high value : ' ).strip() )
__lowerCAmelCase = int(input('Enter value to guess : ' ).strip() )
guess_the_number(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 102 | 1 |
'''simple docstring'''
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def snake_case_ ( _lowerCAmelCase : str ) -> Optional[Any]:
def wrapper(*_lowerCAmelCase : List[Any] , **_lowerCAmelCase : List[Any] ):
UpperCAmelCase : Union[str, Any] = timeit.default_timer()
UpperCAmelCase : Union[str, Any] = func(*A__ , **A__ )
UpperCAmelCase : Dict = timeit.default_timer() - starttime
return delta
UpperCAmelCase : Optional[Any] = func.__name__
return wrapper
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int]=100 , _lowerCAmelCase : str=None ) -> Tuple:
UpperCAmelCase : str = []
UpperCAmelCase : List[str] = seq_shapes or {}
for i in range(A__ ):
UpperCAmelCase : int = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(A__ , _ArrayXD ):
UpperCAmelCase : Optional[int] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(A__ , datasets.Value ):
if v.dtype == "string":
UpperCAmelCase : List[str] = "The small grey turtle was surprisingly fast when challenged."
else:
UpperCAmelCase : List[str] = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(A__ , datasets.Sequence ):
while isinstance(A__ , datasets.Sequence ):
UpperCAmelCase : List[str] = v.feature
UpperCAmelCase : List[Any] = seq_shapes[k]
UpperCAmelCase : Dict = np.random.rand(*A__ ).astype(v.dtype )
UpperCAmelCase : Dict = data
dummy_data.append((i, example) )
return dummy_data
def snake_case_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any]=100 , _lowerCAmelCase : Dict=None ) -> List[str]:
UpperCAmelCase : Optional[Any] = generate_examples(A__ , num_examples=A__ , seq_shapes=A__ )
with ArrowWriter(features=A__ , path=A__ ) as writer:
for key, record in dummy_data:
UpperCAmelCase : Any = features.encode_example(A__ )
writer.write(A__ )
UpperCAmelCase : Optional[int] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" )
UpperCAmelCase : List[Any] = datasets.Dataset.from_file(filename=A__ , info=datasets.DatasetInfo(features=A__ ) )
return dataset
| 127 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCamelCase_ :
@staticmethod
def _SCREAMING_SNAKE_CASE ( *lowerCAmelCase_ : Any , **lowerCAmelCase_ : Dict ) -> str:
pass
def snake_case ( A__ ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
lowerCamelCase_ = (
'''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'''
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
__magic_name__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ : int = pipeline(
"document-question-answering" , model=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
UpperCAmelCase_ : int = INVOICE_URL
UpperCAmelCase_ : Union[str, Any] = list(zip(*apply_tesseract(load_image(lowerCAmelCase_ ) , lowerCAmelCase_ , "" ) ) )
UpperCAmelCase_ : Optional[Any] = "What is the placebo?"
UpperCAmelCase_ : Tuple = [
{
"image": load_image(lowerCAmelCase_ ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] ) -> int:
UpperCAmelCase_ : Union[str, Any] = dqa_pipeline(lowerCAmelCase_ , top_k=2 )
self.assertEqual(
lowerCAmelCase_ , [
[
{"score": ANY(lowerCAmelCase_ ), "answer": ANY(lowerCAmelCase_ ), "start": ANY(lowerCAmelCase_ ), "end": ANY(lowerCAmelCase_ )},
{"score": ANY(lowerCAmelCase_ ), "answer": ANY(lowerCAmelCase_ ), "start": ANY(lowerCAmelCase_ ), "end": ANY(lowerCAmelCase_ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
UpperCAmelCase_ : Tuple = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" )
UpperCAmelCase_ : Dict = INVOICE_URL
UpperCAmelCase_ : int = "How many cats are there?"
UpperCAmelCase_ : Any = [
{"score": 0.0_0_0_1, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0_0_0_1, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
UpperCAmelCase_ : List[str] = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase_ , decimals=4 ) , lowerCAmelCase_ )
UpperCAmelCase_ : Dict = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase_ , decimals=4 ) , lowerCAmelCase_ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
UpperCAmelCase_ : int = "./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCAmelCase_ : Dict = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(lowerCAmelCase_ , [] )
# We can optionnally pass directly the words and bounding boxes
UpperCAmelCase_ : int = "./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Optional[Any] = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , words=lowerCAmelCase_ , boxes=lowerCAmelCase_ , top_k=2 )
self.assertEqual(lowerCAmelCase_ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _SCREAMING_SNAKE_CASE ( self : Any ) -> int:
UpperCAmelCase_ : Dict = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
UpperCAmelCase_ : Optional[Any] = INVOICE_URL
UpperCAmelCase_ : Dict = "What is the invoice number?"
UpperCAmelCase_ : int = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.9_9_4_4, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_0_0_9, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCAmelCase_ : int = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.9_9_4_4, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_0_0_9, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCAmelCase_ : Any = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{"score": 0.9_9_4_4, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_0_0_9, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
UpperCAmelCase_ : Tuple = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , )
UpperCAmelCase_ : Tuple = INVOICE_URL
UpperCAmelCase_ : Any = "What is the invoice number?"
UpperCAmelCase_ : str = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.9_9_7_4, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_9_4_8, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCAmelCase_ : Optional[int] = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.9_9_7_4, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_9_4_8, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCAmelCase_ : str = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{"score": 0.9_9_7_4, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_9_4_8, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=lowerCAmelCase_ )
UpperCAmelCase_ : str = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=lowerCAmelCase_ , revision="3dc6de3" , )
UpperCAmelCase_ : Any = INVOICE_URL
UpperCAmelCase_ : List[str] = "What is the invoice number?"
UpperCAmelCase_ : str = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.4_2_5_1, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_8_1_9, "answer": "1110212019", "start": 23, "end": 23},
] , )
UpperCAmelCase_ : Dict = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.4_2_5_1, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_8_1_9, "answer": "1110212019", "start": 23, "end": 23},
] , )
UpperCAmelCase_ : Union[str, Any] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{"score": 0.4_2_5_1, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_8_1_9, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 , )
UpperCAmelCase_ : Dict = list(zip(*apply_tesseract(load_image(lowerCAmelCase_ ) , lowerCAmelCase_ , "" ) ) )
# This model should also work if `image` is set to None
UpperCAmelCase_ : List[str] = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.4_2_5_1, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_8_1_9, "answer": "1110212019", "start": 23, "end": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=lowerCAmelCase_ )
UpperCAmelCase_ : str = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=lowerCAmelCase_ , revision="3dc6de3" , max_seq_len=50 , )
UpperCAmelCase_ : List[Any] = INVOICE_URL
UpperCAmelCase_ : Optional[int] = "What is the invoice number?"
UpperCAmelCase_ : int = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.9_9_9_9, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_9_9_8, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCAmelCase_ : Tuple = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{"score": 0.9_9_9_9, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_9_9_8, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
UpperCAmelCase_ : Optional[int] = list(zip(*apply_tesseract(load_image(lowerCAmelCase_ ) , lowerCAmelCase_ , "" ) ) )
# This model should also work if `image` is set to None
UpperCAmelCase_ : Dict = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.9_9_9_9, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_9_9_8, "answer": "us-001", "start": 16, "end": 16},
] , )
@slow
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
UpperCAmelCase_ : List[Any] = pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
UpperCAmelCase_ : Optional[int] = INVOICE_URL
UpperCAmelCase_ : int = "What is the invoice number?"
UpperCAmelCase_ : List[str] = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase_ , decimals=4 ) , [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
pass
| 95 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A_ = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 28 |
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Optional[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
_snake_case : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("""RGB""" )
return image
def UpperCAmelCase__ (snake_case__ : Any ):
"""simple docstring"""
_snake_case : str = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.weight", F"vision_model.encoder.layers.{i}.layer_norm1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.bias", F"vision_model.encoder.layers.{i}.layer_norm1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.weight", F"vision_model.encoder.layers.{i}.layer_norm2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.bias", F"vision_model.encoder.layers.{i}.layer_norm2.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.qkv.weight", F"vision_model.encoder.layers.{i}.self_attn.qkv.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.weight", F"vision_model.encoder.layers.{i}.self_attn.projection.weight",) )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.bias", F"vision_model.encoder.layers.{i}.self_attn.projection.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.weight", F"vision_model.encoder.layers.{i}.mlp.fc1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.bias", F"vision_model.encoder.layers.{i}.mlp.fc1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.weight", F"vision_model.encoder.layers.{i}.mlp.fc2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.bias", F"vision_model.encoder.layers.{i}.mlp.fc2.bias") )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : Optional[Any] = dct.pop(snake_case__ )
_snake_case : Optional[int] = val
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : str ):
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_snake_case : Optional[int] = state_dict.pop(F"visual_encoder.blocks.{i}.attn.q_bias" )
_snake_case : Tuple = state_dict.pop(F"visual_encoder.blocks.{i}.attn.v_bias" )
# next, set bias in the state dict
_snake_case : List[str] = torch.cat((q_bias, torch.zeros_like(snake_case__ , requires_grad=snake_case__ ), v_bias) )
_snake_case : Dict = qkv_bias
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
_snake_case : List[Any] = 3_64 if """coco""" in model_name else 2_24
_snake_case : List[str] = BlipaVisionConfig(image_size=snake_case__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_snake_case : List[str] = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=snake_case__ ).to_dict()
elif "opt-6.7b" in model_name:
_snake_case : List[str] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=snake_case__ ).to_dict()
elif "t5-xl" in model_name:
_snake_case : Tuple = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_snake_case : List[Any] = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
_snake_case : int = BlipaConfig(vision_config=snake_case__ , text_config=snake_case__ )
return config, image_size
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int=None , snake_case__ : str=False ):
"""simple docstring"""
_snake_case : List[str] = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
_snake_case : str = tokenizer("""\n""" , add_special_tokens=snake_case__ ).input_ids[0]
_snake_case , _snake_case : Dict = get_blipa_config(snake_case__ , eos_token_id=snake_case__ )
_snake_case : str = BlipaForConditionalGeneration(snake_case__ ).eval()
_snake_case : int = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
_snake_case , _snake_case : List[Any] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
_snake_case : int = """cuda""" if torch.cuda.is_available() else """cpu"""
_snake_case , _snake_case , _snake_case : Any = load_model_and_preprocess(
name=snake_case__ , model_type=snake_case__ , is_eval=snake_case__ , device=snake_case__ )
original_model.eval()
print("""Done!""" )
# update state dict keys
_snake_case : Any = original_model.state_dict()
_snake_case : Dict = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_snake_case : str = state_dict.pop(snake_case__ )
if key.startswith("""Qformer.bert""" ):
_snake_case : str = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
_snake_case : Any = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
_snake_case : List[str] = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
_snake_case : Optional[Any] = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
_snake_case : List[Any] = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
_snake_case : List[Any] = key.replace("""t5""" , """language""" )
_snake_case : str = val
# read in qv biases
read_in_q_v_bias(snake_case__ , snake_case__ )
_snake_case , _snake_case : List[str] = hf_model.load_state_dict(snake_case__ , strict=snake_case__ )
assert len(snake_case__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_snake_case : Any = load_demo_image()
_snake_case : str = vis_processors["""eval"""](snake_case__ ).unsqueeze(0 ).to(snake_case__ )
_snake_case : List[Any] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(snake_case__ )
# create processor
_snake_case : Any = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=snake_case__ , image_std=snake_case__ )
_snake_case : int = BlipaProcessor(image_processor=snake_case__ , tokenizer=snake_case__ )
_snake_case : Any = processor(images=snake_case__ , return_tensors="""pt""" ).pixel_values.to(snake_case__ )
# make sure processor creates exact same pixel values
assert torch.allclose(snake_case__ , snake_case__ )
original_model.to(snake_case__ )
hf_model.to(snake_case__ )
with torch.no_grad():
if "opt" in model_name:
_snake_case : str = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
_snake_case : int = hf_model(snake_case__ , snake_case__ ).logits
else:
_snake_case : str = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
_snake_case : Optional[int] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
_snake_case : Union[str, Any] = hf_model(snake_case__ , snake_case__ , labels=snake_case__ ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_snake_case : List[str] = torch.tensor(
[[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=snake_case__ )
assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_snake_case : Union[str, Any] = torch.tensor(
[[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=snake_case__ )
else:
# cast to same type
_snake_case : int = logits.dtype
assert torch.allclose(original_logits.to(snake_case__ ) , snake_case__ , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
_snake_case : Any = """"""
_snake_case : str = tokenizer(snake_case__ , return_tensors="""pt""" ).input_ids.to(snake_case__ )
_snake_case : Union[str, Any] = original_model.generate({"""image""": original_pixel_values} )
_snake_case : Tuple = hf_model.generate(
snake_case__ , snake_case__ , do_sample=snake_case__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , snake_case__ )
_snake_case : Optional[Any] = input_ids.shape[1]
_snake_case : int = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=snake_case__ )
_snake_case : Optional[Any] = [text.strip() for text in output_text]
print("""HF generation:""" , snake_case__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(snake_case__ )
hf_model.save_pretrained(snake_case__ )
if push_to_hub:
processor.push_to_hub(F"nielsr/{model_name}" )
hf_model.push_to_hub(F"nielsr/{model_name}" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
A_ = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
A_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 28 | 1 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
lowerCAmelCase = pytest.mark.integration
@require_faiss
class lowerCamelCase ( _A ):
def _lowerCamelCase ( self ):
lowerCAmelCase : Dict = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(a_ ) for x in np.arange(30 ).tolist()]} )
return dset
def _lowerCamelCase ( self ):
import faiss
lowerCAmelCase : Dataset = self._create_dummy_dataset()
lowerCAmelCase : Optional[int] = dset.map(
lambda a_ , a_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=a_ , keep_in_memory=a_ )
lowerCAmelCase : List[str] = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCAmelCase , lowerCAmelCase : str = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def _lowerCamelCase ( self ):
import faiss
lowerCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCAmelCase , lowerCAmelCase : Tuple = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def _lowerCamelCase ( self ):
import faiss
lowerCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=a_ ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
lowerCAmelCase , lowerCAmelCase : Dict = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def _lowerCamelCase ( self ):
lowerCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(a_ , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def _lowerCamelCase ( self ):
from elasticsearch import Elasticsearch
lowerCAmelCase : Dataset = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
lowerCAmelCase : Tuple = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCAmelCase : Optional[int] = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
lowerCAmelCase : Optional[int] = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=a_ )
lowerCAmelCase , lowerCAmelCase : List[str] = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class lowerCamelCase ( _A ):
def _lowerCamelCase ( self ):
import faiss
lowerCAmelCase : Dict = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCAmelCase : Tuple = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase : Dict = 1
lowerCAmelCase , lowerCAmelCase : List[str] = index.search(a_ )
self.assertRaises(a_ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCAmelCase : Union[str, Any] = np.eye(5 , dtype=np.floataa )[::-1]
lowerCAmelCase , lowerCAmelCase : Optional[int] = index.search_batch(a_ )
self.assertRaises(a_ , index.search_batch , queries[0] )
lowerCAmelCase : List[Any] = [scores[0] for scores in total_scores]
lowerCAmelCase : str = [indices[0] for indices in total_indices]
self.assertGreater(np.min(a_ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , a_ )
def _lowerCamelCase ( self ):
import faiss
lowerCAmelCase : Optional[int] = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCAmelCase : Tuple = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(a_ ):
lowerCAmelCase : Tuple = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def _lowerCamelCase ( self ):
import faiss
lowerCAmelCase : Optional[Any] = faiss.IndexFlat(5 )
lowerCAmelCase : Optional[int] = FaissIndex(custom_index=a_ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def _lowerCamelCase ( self ):
import faiss
lowerCAmelCase : str = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=a_ ) as tmp_file:
index.save(tmp_file.name )
lowerCAmelCase : int = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCAmelCase : Any = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase : Tuple = 1
lowerCAmelCase , lowerCAmelCase : Dict = index.search(a_ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def __A ( a_ : Union[str, Any] ):
import faiss
lowerCAmelCase : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
lowerCAmelCase : Tuple = "index.faiss"
lowerCAmelCase : Optional[int] = f'''mock://{index_name}'''
index.save(a_ ,storage_options=mockfs.storage_options )
lowerCAmelCase : List[Any] = FaissIndex.load(a_ ,storage_options=mockfs.storage_options )
lowerCAmelCase : Optional[Any] = np.zeros(5 ,dtype=np.floataa )
lowerCAmelCase : Dict = 1
lowerCAmelCase , lowerCAmelCase : Optional[int] = index.search(a_ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class lowerCamelCase ( _A ):
def _lowerCamelCase ( self ):
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
lowerCAmelCase : Union[str, Any] = Elasticsearch()
lowerCAmelCase : Optional[int] = {"acknowledged": True}
lowerCAmelCase : Tuple = ElasticSearchIndex(es_client=a_ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
lowerCAmelCase : Dict = "foo"
lowerCAmelCase : List[str] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCAmelCase , lowerCAmelCase : Tuple = index.search(a_ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCAmelCase : Union[str, Any] = "foo"
lowerCAmelCase : int = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCAmelCase , lowerCAmelCase : Dict = index.search(a_ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCAmelCase : Optional[Any] = ["foo", "bar", "foobar"]
lowerCAmelCase : Union[str, Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCAmelCase , lowerCAmelCase : List[str] = index.search_batch(a_ )
lowerCAmelCase : Any = [scores[0] for scores in total_scores]
lowerCAmelCase : Tuple = [indices[0] for indices in total_indices]
self.assertGreater(np.min(a_ ) , 0 )
self.assertListEqual([1, 1, 1] , a_ )
# batched queries with timeout
lowerCAmelCase : Dict = ["foo", "bar", "foobar"]
lowerCAmelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCAmelCase , lowerCAmelCase : int = index.search_batch(a_ , request_timeout=30 )
lowerCAmelCase : int = [scores[0] for scores in total_scores]
lowerCAmelCase : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(a_ ) , 0 )
self.assertListEqual([1, 1, 1] , a_ )
| 525 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowerCAmelCase = random.Random()
def __A ( a_ : Union[str, Any] ,a_ : Tuple=1.0 ,a_ : Optional[int]=None ,a_ : Union[str, Any]=None ):
if rng is None:
lowerCAmelCase : str = global_rng
lowerCAmelCase : Any = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCamelCase ( unittest.TestCase ):
def __init__( self , a_ , a_=7 , a_=400 , a_=2_000 , a_=1 , a_=0.0 , a_=16_000 , a_=True , a_=True , ):
lowerCAmelCase : Dict = parent
lowerCAmelCase : Optional[int] = batch_size
lowerCAmelCase : Dict = min_seq_length
lowerCAmelCase : Dict = max_seq_length
lowerCAmelCase : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase : Union[str, Any] = feature_size
lowerCAmelCase : Tuple = padding_value
lowerCAmelCase : Dict = sampling_rate
lowerCAmelCase : int = return_attention_mask
lowerCAmelCase : Optional[Any] = do_normalize
def _lowerCamelCase ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _lowerCamelCase ( self , a_=False , a_=False ):
def _flatten(a_ ):
return list(itertools.chain(*a_ ) )
if equal_length:
lowerCAmelCase : List[Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase : Any = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase : List[str] = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
class lowerCamelCase ( _A , unittest.TestCase ):
snake_case_ = WavaVecaFeatureExtractor
def _lowerCamelCase ( self ):
lowerCAmelCase : Tuple = WavaVecaFeatureExtractionTester(self )
def _lowerCamelCase ( self , a_ ):
self.assertTrue(np.all(np.mean(a_ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(a_ , axis=0 ) - 1 ) < 1e-3 ) )
def _lowerCamelCase ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase : str = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCAmelCase : Dict = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase : Optional[int] = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
lowerCAmelCase : Any = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
# Test batched
lowerCAmelCase : Any = feat_extract(a_ , return_tensors="np" ).input_values
lowerCAmelCase : int = feat_extract(a_ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowerCAmelCase : Tuple = np.asarray(a_ )
lowerCAmelCase : int = feat_extract(a_ , return_tensors="np" ).input_values
lowerCAmelCase : Union[str, Any] = feat_extract(a_ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
def _lowerCamelCase ( self ):
lowerCAmelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase : Any = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCAmelCase : Union[str, Any] = ["longest", "max_length", "do_not_pad"]
lowerCAmelCase : Optional[int] = [None, 1_600, None]
for max_length, padding in zip(a_ , a_ ):
lowerCAmelCase : List[Any] = feat_extract(a_ , padding=a_ , max_length=a_ , return_tensors="np" )
lowerCAmelCase : Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self.assertTrue(input_values[0][1_000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def _lowerCamelCase ( self ):
lowerCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase : Optional[int] = range(800 , 1_400 , 200 )
lowerCAmelCase : Tuple = [floats_list((1, x) )[0] for x in lengths]
lowerCAmelCase : str = ["longest", "max_length", "do_not_pad"]
lowerCAmelCase : Any = [None, 1_600, None]
for max_length, padding in zip(a_ , a_ ):
lowerCAmelCase : Dict = feat_extract(a_ , max_length=a_ , padding=a_ )
lowerCAmelCase : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def _lowerCamelCase ( self ):
lowerCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCAmelCase : Dict = feat_extract(
a_ , truncation=a_ , max_length=1_000 , padding="max_length" , return_tensors="np" )
lowerCAmelCase : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def _lowerCamelCase ( self ):
lowerCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCAmelCase : Union[str, Any] = feat_extract(
a_ , truncation=a_ , max_length=1_000 , padding="longest" , return_tensors="np" )
lowerCAmelCase : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_000) )
lowerCAmelCase : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCAmelCase : Union[str, Any] = feat_extract(
a_ , truncation=a_ , max_length=2_000 , padding="longest" , return_tensors="np" )
lowerCAmelCase : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_200) )
@require_torch
def _lowerCamelCase ( self ):
import torch
lowerCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase : List[Any] = np.random.rand(100 ).astype(np.floataa )
lowerCAmelCase : List[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase : Union[str, Any] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase : Tuple = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def _lowerCamelCase ( self ):
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
lowerCAmelCase : Any = WavaVecaConfig.from_pretrained(a_ )
lowerCAmelCase : List[str] = WavaVecaFeatureExtractor.from_pretrained(a_ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == "layer" )
| 525 | 1 |
"""simple docstring"""
import argparse
import json
from tqdm import tqdm
def __SCREAMING_SNAKE_CASE ( ):
_lowercase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--src_path""" , type=__UpperCAmelCase , default="""biencoder-nq-dev.json""" , help="""Path to raw DPR training data""" , )
parser.add_argument(
"""--evaluation_set""" , type=__UpperCAmelCase , help="""where to store parsed evaluation_set file""" , )
parser.add_argument(
"""--gold_data_path""" , type=__UpperCAmelCase , help="""where to store parsed gold_data_path file""" , )
_lowercase : Union[str, Any] = parser.parse_args()
with open(args.src_path , """r""" ) as src_file, open(args.evaluation_set , """w""" ) as eval_file, open(
args.gold_data_path , """w""" ) as gold_file:
_lowercase : List[str] = json.load(__UpperCAmelCase )
for dpr_record in tqdm(__UpperCAmelCase ):
_lowercase : int = dpr_record["""question"""]
_lowercase : List[str] = [context["""title"""] for context in dpr_record["""positive_ctxs"""]]
eval_file.write(question + """\n""" )
gold_file.write("""\t""".join(__UpperCAmelCase ) + """\n""" )
if __name__ == "__main__":
main()
| 600 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase: List[Any] = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: Tuple = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: Optional[int] = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: List[str] = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCAmelCase: Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 600 | 1 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_UpperCamelCase : int = logging.get_logger(__name__)
_UpperCamelCase : Optional[Any] = {
"CarlCochet/trajectory-transformer-halfcheetah-medium-v2": (
"https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Tuple = "trajectory_transformer"
lowerCamelCase__ : Optional[int] = ["past_key_values"]
lowerCamelCase__ : Optional[Any] = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , a=1_0_0 , a=5 , a=1 , a=1 , a=2_4_9 , a=6 , a=1_7 , a=2_5 , a=4 , a=4 , a=1_2_8 , a=0.1 , a=0.1 , a=0.1 , a=0.0_006 , a=5_1_2 , a=0.02 , a=1e-12 , a=1 , a=True , a=1 , a=5_0_2_5_6 , a=5_0_2_5_6 , **a , ) -> List[Any]:
lowercase__ : Union[str, Any] = vocab_size
lowercase__ : List[str] = action_weight
lowercase__ : Union[str, Any] = reward_weight
lowercase__ : Union[str, Any] = value_weight
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : Optional[int] = block_size
lowercase__ : Tuple = action_dim
lowercase__ : Dict = observation_dim
lowercase__ : Dict = transition_dim
lowercase__ : Optional[int] = learning_rate
lowercase__ : int = n_layer
lowercase__ : Optional[int] = n_head
lowercase__ : str = n_embd
lowercase__ : str = embd_pdrop
lowercase__ : Dict = attn_pdrop
lowercase__ : Tuple = resid_pdrop
lowercase__ : Optional[Any] = initializer_range
lowercase__ : List[str] = layer_norm_eps
lowercase__ : str = kaiming_initializer_range
lowercase__ : Union[str, Any] = use_cache
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
| 599 | """simple docstring"""
import argparse
import json
from tqdm import tqdm
def a_ ( ):
'''simple docstring'''
lowercase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=_lowerCAmelCase , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=_lowerCAmelCase , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=_lowerCAmelCase , help='where to store parsed gold_data_path file' , )
lowercase__ : Union[str, Any] = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
lowercase__ : List[str] = json.load(_lowerCAmelCase )
for dpr_record in tqdm(_lowerCAmelCase ):
lowercase__ : Any = dpr_record['question']
lowercase__ : Optional[Any] = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(_lowerCAmelCase ) + '\n' )
if __name__ == "__main__":
main()
| 599 | 1 |
from collections import defaultdict
class lowerCamelCase__ :
def __init__( self : Any , lowercase__ : str , lowercase__ : Any ):
_lowerCAmelCase = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
_lowerCAmelCase = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(lowercase__ ) )
]
_lowerCAmelCase = defaultdict(lowercase__ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
_lowerCAmelCase = (1 << len(lowercase__ )) - 1
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , lowercase__ : Any , lowercase__ : str ):
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
_lowerCAmelCase = self.count_ways_until(lowercase__ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
_lowerCAmelCase = total_ways_util
return self.dp[mask][task_no]
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , lowercase__ : List[Any] ):
# Store the list of persons for each task
for i in range(len(lowercase__ ) ):
for j in task_performed[i]:
self.task[j].append(lowercase__ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
_lowercase: Union[str, Any] = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
_lowercase: Optional[int] = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 225 | from sklearn.metrics import mean_squared_error
import datasets
_lowercase: Tuple = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
_lowercase: Any = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
_lowercase: Union[str, Any] = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'
] , )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('float' ) ),
"references": datasets.Sequence(datasets.Value('float' ) ),
}
else:
return {
"predictions": datasets.Value('float' ),
"references": datasets.Value('float' ),
}
def SCREAMING_SNAKE_CASE__ ( self : Dict , lowercase__ : str , lowercase__ : Tuple , lowercase__ : Any=None , lowercase__ : Optional[Any]="uniform_average" , lowercase__ : int=True ):
_lowerCAmelCase = mean_squared_error(
lowercase__ , lowercase__ , sample_weight=lowercase__ , multioutput=lowercase__ , squared=lowercase__ )
return {"mse": mse}
| 225 | 1 |
import operator
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase = False ,_lowerCAmelCase = None ):
'''simple docstring'''
A_ : Tuple = operator.lt if reverse else operator.gt
A_ : int = solution or []
if not arr:
return solution
A_ : Any = [arr.pop(0 )]
for i, item in enumerate(a__ ):
if _operator(a__ ,sublist[-1] ):
sublist.append(a__ )
arr.pop(a__ )
# merging sublist into solution list
if not solution:
solution.extend(a__ )
else:
while sublist:
A_ : int = sublist.pop(0 )
for i, xx in enumerate(a__ ):
if not _operator(a__ ,a__ ):
solution.insert(a__ ,a__ )
break
else:
solution.append(a__ )
strand_sort(a__ ,a__ ,a__ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 569 |
"""simple docstring"""
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase ( a__ , a__ , a__ ):
'''simple docstring'''
lowerCAmelCase :Tuple = MobileBertConfig.from_json_file(a__ )
print(F"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase :Tuple = MobileBertForPreTraining(a__ )
# Load weights from tf checkpoint
lowerCAmelCase :Any = load_tf_weights_in_mobilebert(a__ , a__ , a__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , a__ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--mobilebert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained MobileBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path) | 553 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ : int = {
"""asapp/sew-tiny-100k""": """https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json""",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class _A ( __a ):
__a = "sew"
def __init__( self , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1e-5 , SCREAMING_SNAKE_CASE__="group" , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , SCREAMING_SNAKE_CASE__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=128 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=0.05 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__="mean" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=2 , **SCREAMING_SNAKE_CASE__ , ) -> Tuple:
super().__init__(**SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = hidden_size
lowerCamelCase__ = feat_extract_norm
lowerCamelCase__ = feat_extract_activation
lowerCamelCase__ = list(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = list(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = list(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = conv_bias
lowerCamelCase__ = num_conv_pos_embeddings
lowerCamelCase__ = num_conv_pos_embedding_groups
lowerCamelCase__ = len(self.conv_dim )
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = squeeze_factor
lowerCamelCase__ = hidden_act
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = hidden_dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = activation_dropout
lowerCamelCase__ = feat_proj_dropout
lowerCamelCase__ = final_dropout
lowerCamelCase__ = layerdrop
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = initializer_range
lowerCamelCase__ = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
f'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'
f'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase__ = apply_spec_augment
lowerCamelCase__ = mask_time_prob
lowerCamelCase__ = mask_time_length
lowerCamelCase__ = mask_time_min_masks
lowerCamelCase__ = mask_feature_prob
lowerCamelCase__ = mask_feature_length
lowerCamelCase__ = mask_feature_min_masks
# ctc loss
lowerCamelCase__ = ctc_loss_reduction
lowerCamelCase__ = ctc_zero_infinity
# sequence classification
lowerCamelCase__ = use_weighted_layer_sum
lowerCamelCase__ = classifier_proj_size
@property
def _lowerCamelCase ( self ) -> str:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 706 |
"""simple docstring"""
def UpperCAmelCase__ ( A__ ) -> list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(A__ , A__ ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(A__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 274 | 0 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = CpmAntTokenizer
lowercase_ = False
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->List[str]:
'''simple docstring'''
super().setUp()
lowerCamelCase__: Optional[int] =[
"<d>",
"</d>",
"<s>",
"</s>",
"</_>",
"<unk>",
"<pad>",
"</n>",
"我",
"是",
"C",
"P",
"M",
"A",
"n",
"t",
]
lowerCamelCase__: Any =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
@tooslow
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: List[Any] =CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b")
lowerCamelCase__: Union[str, Any] ="今天天气真好!"
lowerCamelCase__: int =["今天", "天气", "真", "好", "!"]
lowerCamelCase__: List[str] =tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Dict ="今天天气真好!"
lowerCamelCase__: str =[tokenizer.bos_token] + tokens
lowerCamelCase__: Union[str, Any] =[6, 9_802, 14_962, 2_082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_)
lowerCamelCase__: Tuple =tokenizer.decode(UpperCAmelCase_)
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
| 59 |
'''simple docstring'''
import datasets
__lowerCamelCase : int = """\
@InProceedings{conneau2018xnli,
author = \"Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin\",
title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",
booktitle = \"Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing\",
year = \"2018\",
publisher = \"Association for Computational Linguistics\",
location = \"Brussels, Belgium\",
}
"""
__lowerCamelCase : List[Any] = """\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
"""
__lowerCamelCase : List[Any] = """
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
'accuracy': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric(\"xnli\")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
"""
def __snake_case (__UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def __UpperCamelCase ( self : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return {"accuracy": simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )}
| 501 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowercase ( metaclass=__lowerCAmelCase ):
lowerCamelCase_ =['''transformers''', '''torch''', '''note_seq''']
def __init__( self : Tuple , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : str) -> str:
requires_backends(self , ["transformers", "torch", "note_seq"])
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *__lowerCAmelCase : Tuple , **__lowerCAmelCase : Tuple) -> Optional[Any]:
requires_backends(cls , ["transformers", "torch", "note_seq"])
@classmethod
def __UpperCAmelCase ( cls : Any , *__lowerCAmelCase : int , **__lowerCAmelCase : Union[str, Any]) -> int:
requires_backends(cls , ["transformers", "torch", "note_seq"])
| 717 | '''simple docstring'''
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class lowercase :
def __init__( self : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any=14 , __lowerCAmelCase : Optional[Any]=7 , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Any=99 , __lowerCAmelCase : Tuple=32 , __lowerCAmelCase : Union[str, Any]=5 , __lowerCAmelCase : str=4 , __lowerCAmelCase : Dict=37 , __lowerCAmelCase : Optional[int]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : str=512 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : str=0.02 , __lowerCAmelCase : str=3 , __lowerCAmelCase : Tuple=4 , __lowerCAmelCase : Union[str, Any]=None , ) -> List[Any]:
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_token_type_ids
lowercase_ = use_input_mask
lowercase_ = use_labels
lowercase_ = use_mc_token_ids
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
lowercase_ = self.vocab_size - 1
def __UpperCAmelCase ( self : Tuple) -> int:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length])
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowercase_ = None
if self.use_mc_token_ids:
lowercase_ = ids_tensor([self.batch_size, self.num_choices] , self.seq_length)
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase_ = ids_tensor([self.batch_size] , self.num_choices)
lowercase_ = self.get_config()
lowercase_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2)
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __UpperCAmelCase ( self : Union[str, Any]) -> int:
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , *__lowerCAmelCase : Tuple) -> List[str]:
lowercase_ = CTRLModel(config=__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , head_mask=__lowerCAmelCase)
model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase)
lowercase_ = model(__lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(len(result.past_key_values) , config.n_layer)
def __UpperCAmelCase ( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Dict , *__lowerCAmelCase : int) -> int:
lowercase_ = CTRLLMHeadModel(__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
lowercase_ = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __UpperCAmelCase ( self : Dict) -> int:
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask}
return config, inputs_dict
def __UpperCAmelCase ( self : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , *__lowerCAmelCase : List[Any]) -> int:
lowercase_ = self.num_labels
lowercase_ = CTRLForSequenceClassification(__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase_ = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
@require_torch
class lowercase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
lowerCamelCase_ =(CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowerCamelCase_ =(CTRLLMHeadModel,) if is_torch_available() else ()
lowerCamelCase_ =(
{
'feature-extraction': CTRLModel,
'text-classification': CTRLForSequenceClassification,
'text-generation': CTRLLMHeadModel,
'zero-shot': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase_ =True
lowerCamelCase_ =False
lowerCamelCase_ =False
def __UpperCAmelCase ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int) -> Optional[int]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def __UpperCAmelCase ( self : str) -> Tuple:
lowercase_ = CTRLModelTester(self)
lowercase_ = ConfigTester(self , config_class=__lowerCAmelCase , n_embd=37)
def __UpperCAmelCase ( self : Dict) -> Optional[Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : str) -> List[Any]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Any) -> List[Any]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__lowerCAmelCase)
def __UpperCAmelCase ( self : Tuple) -> Dict:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__lowerCAmelCase)
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def __UpperCAmelCase ( self : Union[str, Any]) -> Optional[Any]:
pass
@slow
def __UpperCAmelCase ( self : Dict) -> List[str]:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = CTRLModel.from_pretrained(__lowerCAmelCase)
self.assertIsNotNone(__lowerCAmelCase)
@unittest.skip("The model doesn't support left padding") # and it's not used enough to be worth fixing :)
def __UpperCAmelCase ( self : Optional[Any]) -> Dict:
pass
@require_torch
class lowercase ( unittest.TestCase ):
def __UpperCAmelCase ( self : Optional[int]) -> Any:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def __UpperCAmelCase ( self : int) -> Any:
lowercase_ = CTRLLMHeadModel.from_pretrained("ctrl")
model.to(__lowerCAmelCase)
lowercase_ = torch.tensor(
[[1_1859, 0, 1611, 8]] , dtype=torch.long , device=__lowerCAmelCase) # Legal the president is
lowercase_ = [
1_1859,
0,
1611,
8,
5,
150,
2_6449,
2,
19,
348,
469,
3,
2595,
48,
2_0740,
24_6533,
24_6533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
lowercase_ = model.generate(__lowerCAmelCase , do_sample=__lowerCAmelCase)
self.assertListEqual(output_ids[0].tolist() , __lowerCAmelCase)
| 461 | 0 |
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Union[str, Any] = "encodec"
def __init__( self , _lowerCAmelCase=[1.5, 3.0, 6.0, 12.0, 24.0] , _lowerCAmelCase=24000 , _lowerCAmelCase=1 , _lowerCAmelCase=False , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=128 , _lowerCAmelCase=32 , _lowerCAmelCase=1 , _lowerCAmelCase=[8, 5, 4, 2] , _lowerCAmelCase="weight_norm" , _lowerCAmelCase=7 , _lowerCAmelCase=7 , _lowerCAmelCase=3 , _lowerCAmelCase=2 , _lowerCAmelCase=True , _lowerCAmelCase="reflect" , _lowerCAmelCase=2 , _lowerCAmelCase=2 , _lowerCAmelCase=1.0 , _lowerCAmelCase=1024 , _lowerCAmelCase=None , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> Any:
_lowerCAmelCase = target_bandwidths
_lowerCAmelCase = sampling_rate
_lowerCAmelCase = audio_channels
_lowerCAmelCase = normalize
_lowerCAmelCase = chunk_length_s
_lowerCAmelCase = overlap
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_filters
_lowerCAmelCase = num_residual_layers
_lowerCAmelCase = upsampling_ratios
_lowerCAmelCase = norm_type
_lowerCAmelCase = kernel_size
_lowerCAmelCase = last_kernel_size
_lowerCAmelCase = residual_kernel_size
_lowerCAmelCase = dilation_growth_rate
_lowerCAmelCase = use_causal_conv
_lowerCAmelCase = pad_mode
_lowerCAmelCase = compress
_lowerCAmelCase = num_lstm_layers
_lowerCAmelCase = trim_right_ratio
_lowerCAmelCase = codebook_size
_lowerCAmelCase = codebook_dim if codebook_dim is not None else hidden_size
_lowerCAmelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' )
super().__init__(**_lowerCAmelCase )
@property
def _snake_case ( self ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _snake_case ( self ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def _snake_case ( self ) -> int:
_lowerCAmelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def _snake_case ( self ) -> int:
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 18 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
if is_vision_available():
import PIL
class UpperCamelCase ( _UpperCAmelCase ):
lowerCAmelCase : Union[str, Any] = ["""pixel_values"""]
def __init__( self , UpperCAmelCase__ = True , UpperCAmelCase__ = None , UpperCAmelCase__ = PILImageResampling.BICUBIC , UpperCAmelCase__ = True , UpperCAmelCase__ = None , UpperCAmelCase__ = True , UpperCAmelCase__ = 1 / 255 , UpperCAmelCase__ = True , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = True , **UpperCAmelCase__ , ):
super().__init__(**UpperCAmelCase__ )
A__ = size if size is not None else {"shortest_edge": 224}
A__ = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
A__ = crop_size if crop_size is not None else {"height": 224, "width": 224}
A__ = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ , param_name="crop_size" )
A__ = do_resize
A__ = size
A__ = resample
A__ = do_center_crop
A__ = crop_size
A__ = do_rescale
A__ = rescale_factor
A__ = do_normalize
A__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A__ = image_std if image_std is not None else OPENAI_CLIP_STD
A__ = do_convert_rgb
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = PILImageResampling.BICUBIC , UpperCAmelCase__ = None , **UpperCAmelCase__ , ):
A__ = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
A__ = get_resize_output_image_size(UpperCAmelCase__ , size=size["shortest_edge"] , default_to_square=UpperCAmelCase__ )
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , **UpperCAmelCase__ , ):
A__ = get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(UpperCAmelCase__ , size=(size["height"], size["width"]) , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , **UpperCAmelCase__ , ):
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , **UpperCAmelCase__ , ):
return normalize(UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = ChannelDimension.FIRST , **UpperCAmelCase__ , ):
A__ = do_resize if do_resize is not None else self.do_resize
A__ = size if size is not None else self.size
A__ = get_size_dict(UpperCAmelCase__ , param_name="size" , default_to_square=UpperCAmelCase__ )
A__ = resample if resample is not None else self.resample
A__ = do_center_crop if do_center_crop is not None else self.do_center_crop
A__ = crop_size if crop_size is not None else self.crop_size
A__ = get_size_dict(UpperCAmelCase__ , param_name="crop_size" , default_to_square=UpperCAmelCase__ )
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = image_mean if image_mean is not None else self.image_mean
A__ = image_std if image_std is not None else self.image_std
A__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A__ = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A__ = [convert_to_rgb(UpperCAmelCase__ ) for image in images]
# All transformations expect numpy arrays.
A__ = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_resize:
A__ = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_center_crop:
A__ = [self.center_crop(image=UpperCAmelCase__ , size=UpperCAmelCase__ ) for image in images]
if do_rescale:
A__ = [self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images]
if do_normalize:
A__ = [self.normalize(image=UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ ) for image in images]
A__ = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
A__ = {"pixel_values": images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
| 491 | 0 |
import numpy as np
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[str] ):
'''simple docstring'''
__lowerCamelCase : Any = (0, 0)
__lowerCamelCase : List[str] = None
__lowerCamelCase : List[str] = 0
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : List[str] = 0
def __eq__( self : Tuple , _lowerCamelCase : Dict ):
'''simple docstring'''
return self.position == cell.position
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
print(self.position )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : str , _lowerCamelCase : List[Any]=(5, 5) ):
'''simple docstring'''
__lowerCamelCase : Tuple = np.zeros(_lowerCamelCase )
__lowerCamelCase : int = world_size[0]
__lowerCamelCase : List[Any] = world_size[1]
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
print(self.w )
def _snake_case ( self : str , _lowerCamelCase : Tuple ):
'''simple docstring'''
__lowerCamelCase : int = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
__lowerCamelCase : Optional[Any] = cell.position[0]
__lowerCamelCase : int = cell.position[1]
__lowerCamelCase : Optional[int] = []
for n in neughbour_cord:
__lowerCamelCase : List[str] = current_x + n[0]
__lowerCamelCase : str = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
__lowerCamelCase : List[Any] = Cell()
__lowerCamelCase : Tuple = (x, y)
__lowerCamelCase : str = cell
neighbours.append(_lowerCamelCase )
return neighbours
def _UpperCAmelCase ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : str ):
"""simple docstring"""
__lowerCamelCase : str = []
__lowerCamelCase : Union[str, Any] = []
_open.append(UpperCAmelCase )
while _open:
__lowerCamelCase : Tuple = np.argmin([n.f for n in _open] )
__lowerCamelCase : List[str] = _open[min_f]
_closed.append(_open.pop(UpperCAmelCase ) )
if current == goal:
break
for n in world.get_neigbours(UpperCAmelCase ):
for c in _closed:
if c == n:
continue
__lowerCamelCase : Any = current.g + 1
__lowerCamelCase : List[str] = n.position
__lowerCamelCase : Tuple = goal.position
__lowerCamelCase : Optional[int] = (ya - ya) ** 2 + (xa - xa) ** 2
__lowerCamelCase : Optional[int] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(UpperCAmelCase )
__lowerCamelCase : Tuple = []
while current.parent is not None:
path.append(current.position )
__lowerCamelCase : int = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
__UpperCamelCase : Any = Gridworld()
# Start position and goal
__UpperCamelCase : Optional[Any] = Cell()
__UpperCamelCase : Optional[Any] = (0, 0)
__UpperCamelCase : Optional[int] = Cell()
__UpperCamelCase : Any = (4, 4)
print(F'''path from {start.position} to {goal.position}''')
__UpperCamelCase : List[Any] = astar(world, start, goal)
# Just for visual reasons.
for i in s:
__UpperCamelCase : Optional[int] = 1
print(world.w)
| 707 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Optional[Any] = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 458 | 0 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ : Tuple =logging.get_logger(__name__)
lowerCAmelCase__ : str ={'vocab_file': 'vocab.json'}
lowerCAmelCase__ : Union[str, Any] ={
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
lowerCAmelCase__ : str ={'mgp-str': 27}
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__="[GO]" , lowerCAmelCase__="[GO]" , lowerCAmelCase__="[s]" , lowerCAmelCase__="[GO]" , **lowerCAmelCase__ ):
"""simple docstring"""
super().__init__(
unk_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding='utf-8' ) as vocab_handle:
SCREAMING_SNAKE_CASE_ : int = json.load(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = {v: k for k, v in self.vocab.items()}
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.vocab )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = []
for s in text:
char_tokens.extend(lowerCAmelCase__ )
return char_tokens
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
return self.vocab.get(lowerCAmelCase__ , self.vocab.get(self.unk_token ) )
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
return self.decoder.get(lowerCAmelCase__ )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase__ ):
logger.error('Vocabulary path ({}) should be a directory'.format(lowerCAmelCase__ ) )
return
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + '\n' )
return (vocab_file,)
| 101 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = torch.device('''cpu''')
def snake_case ( ):
UpperCAmelCase_ : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ : str = Image.open(requests.get(A__ ,stream=A__ ).raw )
return im
def snake_case ( A__ ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : Tuple = dct.pop(A__ )
UpperCAmelCase_ : Optional[Any] = val
def snake_case ( A__ ):
UpperCAmelCase_ : List[str] = []
for k in state_dict.keys():
UpperCAmelCase_ : Union[str, Any] = k
if ".pwconv" in k:
UpperCAmelCase_ : Dict = k_new.replace(".pwconv" ,".point_wise_conv" )
if ".dwconv" in k:
UpperCAmelCase_ : Any = k_new.replace(".dwconv" ,".depth_wise_conv" )
if ".Proj." in k:
UpperCAmelCase_ : Dict = k_new.replace(".Proj." ,".proj." )
if "patch_embed" in k_new:
UpperCAmelCase_ : Tuple = k_new.replace("patch_embed" ,"swiftformer.patch_embed.patch_embedding" )
if "network" in k_new:
UpperCAmelCase_ : List[Any] = k_new.split("." )
if ls[2].isdigit():
UpperCAmelCase_ : Tuple = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] )
else:
UpperCAmelCase_ : Optional[Any] = k_new.replace("network" ,"swiftformer.encoder.network" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : Optional[int] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase_ : Optional[Any] = 10_00
UpperCAmelCase_ : str = "huggingface/label-files"
UpperCAmelCase_ : str = "imagenet-1k-id2label.json"
UpperCAmelCase_ : List[str] = json.load(open(hf_hub_download(A__ ,A__ ,repo_type="dataset" ) ,"r" ) )
UpperCAmelCase_ : Tuple = {int(A__ ): v for k, v in idalabel.items()}
UpperCAmelCase_ : List[Any] = idalabel
UpperCAmelCase_ : Optional[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
UpperCAmelCase_ : Tuple = [3, 3, 6, 4]
UpperCAmelCase_ : str = [48, 56, 1_12, 2_20]
elif swiftformer_name == "swiftformer_s":
UpperCAmelCase_ : Optional[Any] = [3, 3, 9, 6]
UpperCAmelCase_ : Optional[Any] = [48, 64, 1_68, 2_24]
elif swiftformer_name == "swiftformer_l1":
UpperCAmelCase_ : int = [4, 3, 10, 5]
UpperCAmelCase_ : Union[str, Any] = [48, 96, 1_92, 3_84]
elif swiftformer_name == "swiftformer_l3":
UpperCAmelCase_ : Dict = [4, 4, 12, 6]
UpperCAmelCase_ : Optional[int] = [64, 1_28, 3_20, 5_12]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("https" ):
UpperCAmelCase_ : List[Any] = torch.hub.load_state_dict_from_url(A__ ,map_location="cpu" ,check_hash=A__ )
else:
UpperCAmelCase_ : Any = torch.load(A__ ,map_location="cpu" )
UpperCAmelCase_ : List[str] = checkpoint
UpperCAmelCase_ : Dict = create_rename_keys(A__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(A__ ,A__ ,A__ )
# load HuggingFace model
UpperCAmelCase_ : Optional[int] = SwiftFormerForImageClassification(A__ ).eval()
hf_model.load_state_dict(A__ )
# prepare test inputs
UpperCAmelCase_ : Tuple = prepare_img()
UpperCAmelCase_ : int = ViTImageProcessor.from_pretrained("preprocessor_config" )
UpperCAmelCase_ : int = processor(images=A__ ,return_tensors="pt" )
# compare outputs from both models
UpperCAmelCase_ : List[Any] = get_expected_output(A__ )
UpperCAmelCase_ : int = hf_model(inputs["pixel_values"] ).logits
assert hf_logits.shape == torch.Size([1, 10_00] )
assert torch.allclose(hf_logits[0, 0:5] ,A__ ,atol=1e-3 )
Path(A__ ).mkdir(exist_ok=A__ )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(A__ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
lowerCamelCase_ = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 95 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ : Optional[int] = {
'configuration_lxmert': ['LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LxmertConfig'],
'tokenization_lxmert': ['LxmertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = ['LxmertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : str = [
'LxmertEncoder',
'LxmertForPreTraining',
'LxmertForQuestionAnswering',
'LxmertModel',
'LxmertPreTrainedModel',
'LxmertVisualFeatureEncoder',
'LxmertXLayer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = [
'TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLxmertForPreTraining',
'TFLxmertMainLayer',
'TFLxmertModel',
'TFLxmertPreTrainedModel',
'TFLxmertVisualFeatureEncoder',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
a_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 484 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCamelCase__ ( UpperCAmelCase_):
"""simple docstring"""
def _a (self ):
'''simple docstring'''
lowerCamelCase = SMALL_MODEL_IDENTIFIER
lowerCamelCase = "pt"
lowerCamelCase = "tf"
def _a (self , __a ):
'''simple docstring'''
lowerCamelCase = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__a )
def _a (self , __a ):
'''simple docstring'''
lowerCamelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=__a )
model_tf.save_pretrained(__a )
def _a (self ):
'''simple docstring'''
lowerCamelCase = "mock_framework"
# Framework provided - return whatever the user provides
lowerCamelCase = FeaturesManager.determine_framework(self.test_model , __a )
self.assertEqual(__a , __a )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a )
lowerCamelCase = FeaturesManager.determine_framework(__a , __a )
self.assertEqual(__a , __a )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a )
lowerCamelCase = FeaturesManager.determine_framework(__a , __a )
self.assertEqual(__a , __a )
def _a (self ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a )
lowerCamelCase = FeaturesManager.determine_framework(__a )
self.assertEqual(__a , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a )
lowerCamelCase = FeaturesManager.determine_framework(__a )
self.assertEqual(__a , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__a ):
lowerCamelCase = FeaturesManager.determine_framework(__a )
def _a (self ):
'''simple docstring'''
lowerCamelCase = MagicMock(return_value=__a )
with patch("transformers.onnx.features.is_tf_available" , __a ):
lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowerCamelCase = MagicMock(return_value=__a )
with patch("transformers.onnx.features.is_torch_available" , __a ):
lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_tf )
# Both in environment -> use PyTorch
lowerCamelCase = MagicMock(return_value=__a )
lowerCamelCase = MagicMock(return_value=__a )
with patch("transformers.onnx.features.is_tf_available" , __a ), patch(
"transformers.onnx.features.is_torch_available" , __a ):
lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_pt )
# Both not in environment -> raise error
lowerCamelCase = MagicMock(return_value=__a )
lowerCamelCase = MagicMock(return_value=__a )
with patch("transformers.onnx.features.is_tf_available" , __a ), patch(
"transformers.onnx.features.is_torch_available" , __a ):
with self.assertRaises(__a ):
lowerCamelCase = FeaturesManager.determine_framework(self.test_model ) | 484 | 1 |
from __future__ import annotations
UpperCAmelCase : Optional[Any] = 8.988e9 # units = N * m^s * C^-2
def __lowerCamelCase ( lowerCamelCase__ : float , lowerCamelCase__ : float , lowerCamelCase__ : float , lowerCamelCase__ : float ):
'''simple docstring'''
lowerCamelCase = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if distance < 0:
raise ValueError("""Distance cannot be negative""" )
if force == 0:
lowerCamelCase = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
lowerCamelCase = abs(lowerCamelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
lowerCamelCase = abs(lowerCamelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
lowerCamelCase = (COULOMBS_CONSTANT * charge_product / abs(lowerCamelCase__ )) ** 0.5
return {"distance": distance}
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 457 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase : Any = {
"configuration_roc_bert": ["ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoCBertConfig"],
"tokenization_roc_bert": ["RoCBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
"ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoCBertForCausalLM",
"RoCBertForMaskedLM",
"RoCBertForMultipleChoice",
"RoCBertForPreTraining",
"RoCBertForQuestionAnswering",
"RoCBertForSequenceClassification",
"RoCBertForTokenClassification",
"RoCBertLayer",
"RoCBertModel",
"RoCBertPreTrainedModel",
"load_tf_weights_in_roc_bert",
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 457 | 1 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class __magic_name__ ( unittest.TestCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = inspect.getfile(accelerate.test_utils)
SCREAMING_SNAKE_CASE__ : str = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_cli.py"])
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["accelerate", "launch"]
SCREAMING_SNAKE_CASE__ : Tuple = Path.home() / ".cache/huggingface/accelerate"
SCREAMING_SNAKE_CASE__ : Tuple = "default_config.yaml"
SCREAMING_SNAKE_CASE__ : Dict = config_folder / config_file
SCREAMING_SNAKE_CASE__ : List[str] = config_folder / "_default_config.yaml"
SCREAMING_SNAKE_CASE__ : Tuple = Path("tests/test_configs")
@classmethod
def _A ( cls: List[str] ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def _A ( cls: int ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def _A ( self: List[str] ):
SCREAMING_SNAKE_CASE_ = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def _A ( self: str ):
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=_lowerCamelCase ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(_lowerCamelCase ), self.test_file_path] , env=os.environ.copy() )
def _A ( self: Optional[int] ):
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class __magic_name__ ( unittest.TestCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = "test-tpu"
SCREAMING_SNAKE_CASE__ : Optional[Any] = "us-central1-a"
SCREAMING_SNAKE_CASE__ : Dict = "ls"
SCREAMING_SNAKE_CASE__ : List[str] = ["accelerate", "tpu-config"]
SCREAMING_SNAKE_CASE__ : Optional[int] = "cd /usr/share"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "tests/test_samples/test_command_file.sh"
SCREAMING_SNAKE_CASE__ : Any = "Running gcloud compute tpus tpu-vm ssh"
def _A ( self: Optional[int] ):
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=_lowerCamelCase , )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all" , _lowerCamelCase , )
def _A ( self: Any ):
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=_lowerCamelCase , )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all" , _lowerCamelCase , )
def _A ( self: List[Any] ):
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=_lowerCamelCase )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all" , _lowerCamelCase , )
def _A ( self: Tuple ):
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=_lowerCamelCase , )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all" , _lowerCamelCase , )
def _A ( self: Tuple ):
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=_lowerCamelCase , )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all" , _lowerCamelCase , )
def _A ( self: Any ):
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=_lowerCamelCase , )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all" , _lowerCamelCase , )
def _A ( self: int ):
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=_lowerCamelCase , )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all" , _lowerCamelCase , )
def _A ( self: Dict ):
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=_lowerCamelCase , )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all" , _lowerCamelCase , )
def _A ( self: Optional[int] ):
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=_lowerCamelCase , )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all" , _lowerCamelCase , )
| 708 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE ={
"""configuration_xlm_roberta_xl""": [
"""XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaXLConfig""",
"""XLMRobertaXLOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"""XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaXLForCausalLM""",
"""XLMRobertaXLForMaskedLM""",
"""XLMRobertaXLForMultipleChoice""",
"""XLMRobertaXLForQuestionAnswering""",
"""XLMRobertaXLForSequenceClassification""",
"""XLMRobertaXLForTokenClassification""",
"""XLMRobertaXLModel""",
"""XLMRobertaXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 89 | 0 |
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class SCREAMING_SNAKE_CASE (yaml.SafeLoader ):
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : str = [self.constructed_objects[key_node] for key_node, _ in node.value]
__A : Dict = [tuple(_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else key for key in keys]
__A : str = Counter(_UpperCAmelCase)
__A : Optional[int] = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'Got duplicate yaml keys: {duplicate_keys}')
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=False):
'''simple docstring'''
__A : Optional[int] = super().construct_mapping(_UpperCAmelCase , deep=_UpperCAmelCase)
self._check_no_duplicates_on_constructed_node(_UpperCAmelCase)
return mapping
def _lowerCAmelCase ( __snake_case : str ) -> Tuple[Optional[str], str]:
__A : Union[str, Any] = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
__A : Optional[int] = full_content[1:].index('---' ) + 1
__A : int = '\n'.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(__snake_case )
class SCREAMING_SNAKE_CASE (a__ ):
# class attributes
lowerCAmelCase = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def SCREAMING_SNAKE_CASE ( cls , _UpperCAmelCase):
'''simple docstring'''
with open(_UpperCAmelCase , encoding='utf-8') as readme_file:
__A ,__A : int = _split_yaml_from_readme(readme_file.read())
if yaml_string is not None:
return cls.from_yaml_string(_UpperCAmelCase)
else:
return cls()
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if path.exists():
with open(_UpperCAmelCase , encoding='utf-8') as readme_file:
__A : Dict = readme_file.read()
else:
__A : Optional[Any] = None
__A : str = self._to_readme(_UpperCAmelCase)
with open(_UpperCAmelCase , 'w' , encoding='utf-8') as readme_file:
readme_file.write(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase = None):
'''simple docstring'''
if readme_content is not None:
__A ,__A : Any = _split_yaml_from_readme(_UpperCAmelCase)
__A : Union[str, Any] = '---\n' + self.to_yaml_string() + '---\n' + content
else:
__A : Optional[int] = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def SCREAMING_SNAKE_CASE ( cls , _UpperCAmelCase):
'''simple docstring'''
__A : Dict = yaml.load(_UpperCAmelCase , Loader=_NoDuplicateSafeLoader) or {}
# Convert the YAML keys to DatasetMetadata fields
__A : Tuple = {
(key.replace('-' , '_') if key.replace('-' , '_') in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace('_' , '-') if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=_UpperCAmelCase , allow_unicode=_UpperCAmelCase , encoding='utf-8' , ).decode('utf-8')
lowercase__ : List[Any] = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
lowercase__ : int = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
lowercase__ : Tuple = ap.parse_args()
lowercase__ : int = Path(args.readme_filepath)
lowercase__ : Any = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath) | 8 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case :List[str] =logging.get_logger(__name__)
__snake_case :int ={'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : Optional[Any] = 'openai-gpt'
A_ : Any = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : List[Any] , __UpperCamelCase : List[str]=40_478 , __UpperCamelCase : List[Any]=512 , __UpperCamelCase : List[Any]=768 , __UpperCamelCase : Optional[int]=12 , __UpperCamelCase : Dict=12 , __UpperCamelCase : List[str]="gelu" , __UpperCamelCase : str=0.1 , __UpperCamelCase : Optional[Any]=0.1 , __UpperCamelCase : Union[str, Any]=0.1 , __UpperCamelCase : List[str]=1e-5 , __UpperCamelCase : List[Any]=0.0_2 , __UpperCamelCase : str="cls_index" , __UpperCamelCase : int=True , __UpperCamelCase : int=None , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : Tuple=0.1 , **__UpperCamelCase : List[Any] , ) -> List[str]:
A = vocab_size
A = n_positions
A = n_embd
A = n_layer
A = n_head
A = afn
A = resid_pdrop
A = embd_pdrop
A = attn_pdrop
A = layer_norm_epsilon
A = initializer_range
A = summary_type
A = summary_use_proj
A = summary_activation
A = summary_first_dropout
A = summary_proj_to_labels
super().__init__(**__UpperCamelCase ) | 106 | 0 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 715 |
lowerCamelCase ={"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
lowerCamelCase =["a", "b", "c", "d", "e"]
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase__ : str = start
# add current to visited
visited.append(UpperCamelCase__ )
UpperCamelCase__ : int = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
UpperCamelCase__ : int = topological_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# if all neighbors visited add current to sort
sort.append(UpperCamelCase__ )
# if all vertices haven't been visited select a new one to visit
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
for vertice in vertices:
if vertice not in visited:
UpperCamelCase__ : Optional[int] = topological_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# return sort
return sort
if __name__ == "__main__":
lowerCamelCase =topological_sort("a", [], [])
print(sort)
| 462 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''',
}
class __lowerCamelCase (_a ):
_lowercase = """blip_2_vision_model"""
def __init__( self: Optional[int],A_: str=1408,A_: Union[str, Any]=6144,A_: Tuple=39,A_: Optional[int]=16,A_: List[str]=224,A_: Optional[Any]=14,A_: Union[str, Any]="gelu",A_: List[Any]=0.0_0_0_0_1,A_: List[Any]=0.0,A_: List[Any]=1E-10,A_: Optional[int]=True,**A_: Union[str, Any],):
'''simple docstring'''
super().__init__(**A_ )
__UpperCamelCase = hidden_size
__UpperCamelCase = intermediate_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = patch_size
__UpperCamelCase = image_size
__UpperCamelCase = initializer_range
__UpperCamelCase = attention_dropout
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = hidden_act
__UpperCamelCase = qkv_bias
@classmethod
def snake_case_ ( cls: Tuple,A_: Union[str, os.PathLike],**A_: Tuple ):
'''simple docstring'''
cls._set_token_in_kwargs(A_ )
__UpperCamelCase, __UpperCamelCase = cls.get_config_dict(A_,**A_ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
__UpperCamelCase = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(A_,**A_ )
class __lowerCamelCase (_a ):
_lowercase = """blip_2_qformer"""
def __init__( self: Optional[Any],A_: Optional[int]=3_0522,A_: List[Any]=768,A_: Any=12,A_: Dict=12,A_: int=3072,A_: Dict="gelu",A_: Union[str, Any]=0.1,A_: Any=0.1,A_: Any=512,A_: Dict=0.0_2,A_: str=1E-12,A_: List[Any]=0,A_: Optional[int]="absolute",A_: List[str]=2,A_: Optional[Any]=1408,**A_: Optional[Any],):
'''simple docstring'''
super().__init__(pad_token_id=A_,**A_ )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = cross_attention_frequency
__UpperCamelCase = encoder_hidden_size
@classmethod
def snake_case_ ( cls: int,A_: Union[str, os.PathLike],**A_: int ):
'''simple docstring'''
cls._set_token_in_kwargs(A_ )
__UpperCamelCase, __UpperCamelCase = cls.get_config_dict(A_,**A_ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
__UpperCamelCase = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(A_,**A_ )
class __lowerCamelCase (_a ):
_lowercase = """blip-2"""
_lowercase = True
def __init__( self: Union[str, Any],A_: Optional[int]=None,A_: List[str]=None,A_: Optional[Any]=None,A_: List[str]=32,**A_: Optional[Any] ):
'''simple docstring'''
super().__init__(**A_ )
if vision_config is None:
__UpperCamelCase = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
__UpperCamelCase = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
__UpperCamelCase = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__UpperCamelCase = BlipaVisionConfig(**A_ )
__UpperCamelCase = BlipaQFormerConfig(**A_ )
__UpperCamelCase = text_config['model_type'] if 'model_type' in text_config else 'opt'
__UpperCamelCase = CONFIG_MAPPING[text_model_type](**A_ )
__UpperCamelCase = self.text_config.tie_word_embeddings
__UpperCamelCase = self.text_config.is_encoder_decoder
__UpperCamelCase = num_query_tokens
__UpperCamelCase = self.vision_config.hidden_size
__UpperCamelCase = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__UpperCamelCase = 1.0
__UpperCamelCase = 0.0_2
@classmethod
def snake_case_ ( cls: List[Any],A_: BlipaVisionConfig,A_: BlipaQFormerConfig,A_: PretrainedConfig,**A_: Optional[int],):
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict(),qformer_config=qformer_config.to_dict(),text_config=text_config.to_dict(),**A_,)
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = copy.deepcopy(self.__dict__ )
__UpperCamelCase = self.vision_config.to_dict()
__UpperCamelCase = self.qformer_config.to_dict()
__UpperCamelCase = self.text_config.to_dict()
__UpperCamelCase = self.__class__.model_type
return output
| 1 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def SCREAMING_SNAKE_CASE__ ( ) -> int:
'''simple docstring'''
lowercase__ : str = argparse.ArgumentParser()
parser.add_argument(
'-m' , '--pretrained_model_name_or_path' , type=_lowercase , default=_lowercase , required=_lowercase , help='Path to pretrained model or model identifier from huggingface.co/models.' , )
parser.add_argument(
'-c' , '--caption' , type=_lowercase , default='robotic cat with wings' , help='Text used to generate images.' , )
parser.add_argument(
'-n' , '--images_num' , type=_lowercase , default=4 , help='How much images to generate.' , )
parser.add_argument(
'-s' , '--seed' , type=_lowercase , default=42 , help='Seed for random process.' , )
parser.add_argument(
'-ci' , '--cuda_id' , type=_lowercase , default=0 , help='cuda_id.' , )
lowercase__ : Any = parser.parse_args()
return args
def SCREAMING_SNAKE_CASE__ ( _lowercase : Union[str, Any] , _lowercase : Optional[int] , _lowercase : Union[str, Any] ) -> str:
'''simple docstring'''
if not len(_lowercase ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
lowercase__ , lowercase__ : Any = imgs[0].size
lowercase__ : int = Image.new('RGB' , size=(cols * w, rows * h) )
lowercase__ , lowercase__ : Any = grid.size
for i, img in enumerate(_lowercase ):
grid.paste(_lowercase , box=(i % cols * w, i // cols * h) )
return grid
def SCREAMING_SNAKE_CASE__ ( _lowercase : Optional[int] , _lowercase : Tuple="robotic cat with wings" , _lowercase : Optional[Any]=7.5 , _lowercase : int=50 , _lowercase : str=1 , _lowercase : Any=42 , ) -> str:
'''simple docstring'''
lowercase__ : int = torch.Generator(pipeline.device ).manual_seed(_lowercase )
lowercase__ : Any = pipeline(
_lowercase , guidance_scale=_lowercase , num_inference_steps=_lowercase , generator=_lowercase , num_images_per_prompt=_lowercase , ).images
lowercase__ : Dict = int(math.sqrt(_lowercase ) )
lowercase__ : int = image_grid(_lowercase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
__UpperCamelCase: int = parse_args()
# Load models and create wrapper for stable diffusion
__UpperCamelCase: Optional[int] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="""tokenizer""")
__UpperCamelCase: List[str] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""text_encoder""")
__UpperCamelCase: int = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="""vae""")
__UpperCamelCase: int = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""unet""")
__UpperCamelCase: Tuple = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__UpperCamelCase: List[str] = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, """best_model.pt""")):
__UpperCamelCase: Dict = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, """unet""", unet)
else:
__UpperCamelCase: Dict = unet.to(torch.device("""cuda""", args.cuda_id))
__UpperCamelCase: Optional[int] = pipeline.to(unet.device)
__UpperCamelCase, __UpperCamelCase: Union[str, Any] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, """{}.png""".format("""_""".join(args.caption.split()))))
__UpperCamelCase: List[str] = os.path.join(args.pretrained_model_name_or_path, """_""".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, """{}.png""".format(idx + 1)))
| 266 | 0 |
import re
def SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
if len(re.findall('''[ATCG]''' , lowerCAmelCase ) ) != len(lowerCAmelCase ):
raise ValueError('''Invalid Strand''' )
return dna.translate(dna.maketrans('''ATCG''' , '''TAGC''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase : Optional[int] = logging.get_logger(__name__)
lowercase : List[Any] = {
"""facebook/detr-resnet-50""": """https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json""",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class __A( __UpperCAmelCase ):
__A = "detr"
__A = ["past_key_values"]
__A = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self, A=True, A=None, A=3, A=100, A=6, A=2048, A=8, A=6, A=2048, A=8, A=0.0, A=0.0, A=True, A="relu", A=256, A=0.1, A=0.0, A=0.0, A=0.02, A=1.0, A=False, A="sine", A="resnet50", A=True, A=False, A=1, A=5, A=2, A=1, A=1, A=5, A=2, A=0.1, **A, ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
_UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(A, A ):
_UpperCamelCase = backbone_config.get('''model_type''' )
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(A )
# set timm attributes to None
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None, None, None
_UpperCamelCase = use_timm_backbone
_UpperCamelCase = backbone_config
_UpperCamelCase = num_channels
_UpperCamelCase = num_queries
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = encoder_layers
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
_UpperCamelCase = backbone
_UpperCamelCase = use_pretrained_backbone
_UpperCamelCase = dilation
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
super().__init__(is_encoder_decoder=A, **A )
@property
def _UpperCamelCase ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def _UpperCamelCase ( self ):
"""simple docstring"""
return self.d_model
@classmethod
def _UpperCamelCase ( cls, A, **A ):
"""simple docstring"""
return cls(backbone_config=A, **A )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
class __A( __UpperCAmelCase ):
__A = version.parse("1.11" )
@property
def _UpperCamelCase ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def _UpperCamelCase ( self ):
"""simple docstring"""
return 1E-5
@property
def _UpperCamelCase ( self ):
"""simple docstring"""
return 12
| 105 | 0 |
def A ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
lowerCamelCase : List[str] = 0
lowerCamelCase : Optional[int] = len(__snake_case )
for i in range(n - 1 ):
for j in range(i + 1 ,__snake_case ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def A ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
if len(__snake_case ) <= 1:
return arr, 0
lowerCamelCase : List[Any] = len(__snake_case ) // 2
lowerCamelCase : int = arr[0:mid]
lowerCamelCase : List[Any] = arr[mid:]
lowerCamelCase : Optional[Any] = count_inversions_recursive(__snake_case )
lowerCamelCase : Any = count_inversions_recursive(__snake_case )
lowerCamelCase : str = _count_cross_inversions(__snake_case ,__snake_case )
lowerCamelCase : int = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
lowerCamelCase : List[str] = []
lowerCamelCase : Optional[int] = 0
while i < len(__snake_case ) and j < len(__snake_case ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__snake_case ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__snake_case ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def A ( ) -> Any:
lowerCamelCase : Optional[Any] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
lowerCamelCase : Tuple = count_inversions_bf(__snake_case )
lowerCamelCase : Optional[Any] = count_inversions_recursive(__snake_case )
assert num_inversions_bf == num_inversions_recursive == 8
print("number of inversions = " ,__snake_case )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
lowerCamelCase : str = count_inversions_bf(__snake_case )
lowerCamelCase : Any = count_inversions_recursive(__snake_case )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " ,__snake_case )
# an empty list should also have zero inversions
lowerCamelCase : Union[str, Any] = []
lowerCamelCase : List[Any] = count_inversions_bf(__snake_case )
lowerCamelCase : List[Any] = count_inversions_recursive(__snake_case )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " ,__snake_case )
if __name__ == "__main__":
main()
| 311 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : str = [
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Tuple:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__A : Optional[Any] = k.replace(__snake_case , __snake_case )
if k.startswith('encoder' ):
__A : Any = k.replace('.attn' , '.self_attn' )
__A : Any = k.replace('norm1' , 'self_attn_layer_norm' )
__A : str = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
__A : Tuple = k.replace('norm1' , 'self_attn_layer_norm' )
__A : str = k.replace('norm2' , 'encoder_attn_layer_norm' )
__A : int = k.replace('norm3' , 'final_layer_norm' )
return k
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Dict:
__A : Optional[int] = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
__A : Tuple = sd.pop(__snake_case )
__A : Union[str, Any] = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
__A : str = v
lowercase__ : Tuple = ['''START''']
@torch.no_grad()
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any , __snake_case : List[Any] ) -> int:
__A : List[str] = torch.load(__snake_case , map_location='cpu' )
__A : Tuple = model['model']
__A : str = BlenderbotConfig.from_json_file(__snake_case )
__A : int = BlenderbotForConditionalGeneration(__snake_case )
__A : List[Any] = m.model.state_dict().keys()
__A : Optional[int] = []
__A : Optional[int] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__A : Union[str, Any] = rename_state_dict_key(__snake_case )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__A : Optional[Any] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__snake_case )
m.model.load_state_dict(__snake_case , strict=__snake_case )
m.half()
m.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json) | 8 | 0 |
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
_lowercase = logging.get_logger(__name__)
class __A ( A_ ):
def _snake_case (self , __magic_name__ ):
if isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase__ : Any = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__(self , __magic_name__ , __magic_name__ , __magic_name__ ):
if len(__magic_name__ ) == 0 or len(__magic_name__ ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(__magic_name__ ) )
if isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase__ : str = [sequences]
lowerCamelCase__ : Optional[int] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(__magic_name__ )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(A_ )
class __A ( A_ ):
def __init__(self , __magic_name__=ZeroShotClassificationArgumentHandler() , *__magic_name__ , **__magic_name__ ):
lowerCamelCase__ : Union[str, Any] = args_parser
super().__init__(*__magic_name__ , **__magic_name__ )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _snake_case (self ):
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _snake_case (self , __magic_name__ , __magic_name__=True , __magic_name__=True , __magic_name__=TruncationStrategy.ONLY_FIRST , **__magic_name__ ):
lowerCamelCase__ : Dict = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
lowerCamelCase__ : str = self.tokenizer.eos_token
try:
lowerCamelCase__ : List[Any] = self.tokenizer(
__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , )
except Exception as e:
if "too short" in str(__magic_name__ ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
lowerCamelCase__ : Dict = self.tokenizer(
__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , padding=__magic_name__ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _snake_case (self , **__magic_name__ ):
if kwargs.get("""multi_class""" , __magic_name__ ) is not None:
lowerCamelCase__ : int = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
lowerCamelCase__ : int = {}
if "candidate_labels" in kwargs:
lowerCamelCase__ : List[Any] = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
lowerCamelCase__ : Any = kwargs["""hypothesis_template"""]
lowerCamelCase__ : List[Any] = {}
if "multi_label" in kwargs:
lowerCamelCase__ : Any = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__(self , __magic_name__ , *__magic_name__ , **__magic_name__ , ):
if len(__magic_name__ ) == 0:
pass
elif len(__magic_name__ ) == 1 and "candidate_labels" not in kwargs:
lowerCamelCase__ : Union[str, Any] = args[0]
else:
raise ValueError(f"Unable to understand extra arguments {args}" )
return super().__call__(__magic_name__ , **__magic_name__ )
def _snake_case (self , __magic_name__ , __magic_name__=None , __magic_name__="This example is {}." ):
lowerCamelCase__ : Optional[Any] = self._args_parser(__magic_name__ , __magic_name__ , __magic_name__ )
for i, (candidate_label, sequence_pair) in enumerate(zip(__magic_name__ , __magic_name__ ) ):
lowerCamelCase__ : Union[str, Any] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(__magic_name__ ) - 1,
**model_input,
}
def _snake_case (self , __magic_name__ ):
lowerCamelCase__ : int = inputs["""candidate_label"""]
lowerCamelCase__ : int = inputs["""sequence"""]
lowerCamelCase__ : str = {k: inputs[k] for k in self.tokenizer.model_input_names}
lowerCamelCase__ : Optional[int] = self.model(**__magic_name__ )
lowerCamelCase__ : str = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _snake_case (self , __magic_name__ , __magic_name__=False ):
lowerCamelCase__ : Union[str, Any] = [outputs["""candidate_label"""] for outputs in model_outputs]
lowerCamelCase__ : Optional[int] = [outputs["""sequence"""] for outputs in model_outputs]
lowerCamelCase__ : Any = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
lowerCamelCase__ : List[Any] = logits.shape[0]
lowerCamelCase__ : Union[str, Any] = len(__magic_name__ )
lowerCamelCase__ : int = N // n
lowerCamelCase__ : int = logits.reshape((num_sequences, n, -1) )
if multi_label or len(__magic_name__ ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
lowerCamelCase__ : int = self.entailment_id
lowerCamelCase__ : str = -1 if entailment_id == 0 else 0
lowerCamelCase__ : int = reshaped_outputs[..., [contradiction_id, entailment_id]]
lowerCamelCase__ : Optional[int] = np.exp(__magic_name__ ) / np.exp(__magic_name__ ).sum(-1 , keepdims=__magic_name__ )
lowerCamelCase__ : Tuple = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
lowerCamelCase__ : Dict = reshaped_outputs[..., self.entailment_id]
lowerCamelCase__ : str = np.exp(__magic_name__ ) / np.exp(__magic_name__ ).sum(-1 , keepdims=__magic_name__ )
lowerCamelCase__ : int = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 700 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __A ( A_ ):
def _snake_case (self ):
lowerCamelCase__ : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__magic_name__ , """width_multiplier""" ) )
class __A :
def __init__(self , __magic_name__ , __magic_name__=13 , __magic_name__=64 , __magic_name__=2 , __magic_name__=3 , __magic_name__="swish" , __magic_name__=3 , __magic_name__=32 , __magic_name__=0.1 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=True , __magic_name__=10 , __magic_name__=None , __magic_name__=0.25 , __magic_name__=0.0 , __magic_name__=0.0 , ):
lowerCamelCase__ : Optional[Any] = parent
lowerCamelCase__ : List[Any] = batch_size
lowerCamelCase__ : Optional[Any] = image_size
lowerCamelCase__ : Union[str, Any] = patch_size
lowerCamelCase__ : Dict = num_channels
lowerCamelCase__ : Optional[Any] = make_divisible(512 * width_multiplier , divisor=8 )
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : Union[str, Any] = conv_kernel_size
lowerCamelCase__ : int = output_stride
lowerCamelCase__ : Tuple = classifier_dropout_prob
lowerCamelCase__ : Optional[Any] = use_labels
lowerCamelCase__ : Tuple = is_training
lowerCamelCase__ : List[Any] = num_labels
lowerCamelCase__ : str = initializer_range
lowerCamelCase__ : Optional[Any] = scope
lowerCamelCase__ : Tuple = width_multiplier
lowerCamelCase__ : List[Any] = ffn_dropout
lowerCamelCase__ : Union[str, Any] = attn_dropout
def _snake_case (self ):
lowerCamelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : str = None
lowerCamelCase__ : List[Any] = None
if self.use_labels:
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase__ : int = self.get_config()
return config, pixel_values, labels, pixel_labels
def _snake_case (self ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def _snake_case (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase__ : Any = MobileViTVaModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCamelCase__ : Optional[Any] = model(__magic_name__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _snake_case (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase__ : List[str] = self.num_labels
lowerCamelCase__ : Optional[int] = MobileViTVaForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCamelCase__ : Dict = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase__ : Dict = self.num_labels
lowerCamelCase__ : Any = MobileViTVaForSemanticSegmentation(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCamelCase__ : Optional[int] = model(__magic_name__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCamelCase__ : Dict = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _snake_case (self ):
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ : List[str] = config_and_inputs
lowerCamelCase__ : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __A ( A_ , A_ , unittest.TestCase ):
UpperCamelCase :Optional[Any] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase :List[Any] = (
{
'''feature-extraction''': MobileViTVaModel,
'''image-classification''': MobileViTVaForImageClassification,
'''image-segmentation''': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase :List[Any] = False
UpperCamelCase :int = False
UpperCamelCase :Optional[int] = False
UpperCamelCase :int = False
def _snake_case (self ):
lowerCamelCase__ : Optional[int] = MobileViTVaModelTester(self )
lowerCamelCase__ : Optional[int] = MobileViTVaConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ )
def _snake_case (self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def _snake_case (self ):
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def _snake_case (self ):
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def _snake_case (self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def _snake_case (self ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _snake_case (self ):
pass
def _snake_case (self ):
lowerCamelCase__ ,lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : List[Any] = model_class(__magic_name__ )
lowerCamelCase__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Union[str, Any] = [*signature.parameters.keys()]
lowerCamelCase__ : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def _snake_case (self ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def _snake_case (self ):
def check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase__ : Dict = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Optional[int] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
lowerCamelCase__ : str = outputs.hidden_states
lowerCamelCase__ : Dict = 5
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCamelCase__ : Dict = 2
for i in range(len(__magic_name__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowerCamelCase__ ,lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Any = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : Tuple = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
def _snake_case (self ):
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
def _snake_case (self ):
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__magic_name__ )
@slow
def _snake_case (self ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : int = MobileViTVaModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def _A () ->List[Any]:
'''simple docstring'''
lowerCamelCase__ : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __A ( unittest.TestCase ):
@cached_property
def _snake_case (self ):
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def _snake_case (self ):
lowerCamelCase__ : str = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
__magic_name__ )
lowerCamelCase__ : Union[str, Any] = self.default_image_processor
lowerCamelCase__ : Any = prepare_img()
lowerCamelCase__ : Optional[Any] = image_processor(images=__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : List[str] = model(**__magic_name__ )
# verify the logits
lowerCamelCase__ : List[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
lowerCamelCase__ : Optional[Any] = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1E-4 ) )
@slow
def _snake_case (self ):
lowerCamelCase__ : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Dict = model.to(__magic_name__ )
lowerCamelCase__ : str = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : str = prepare_img()
lowerCamelCase__ : str = image_processor(images=__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Dict = model(**__magic_name__ )
lowerCamelCase__ : List[str] = outputs.logits
# verify the logits
lowerCamelCase__ : Tuple = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __magic_name__ )
lowerCamelCase__ : Any = torch.tensor(
[
[[7.08_63, 7.15_25, 6.82_01], [6.69_31, 6.87_70, 6.89_33], [6.29_78, 7.03_66, 6.96_36]],
[[-3.71_34, -3.67_12, -3.66_75], [-3.58_25, -3.35_49, -3.47_77], [-3.34_35, -3.39_79, -3.28_57]],
[[-2.93_29, -2.80_03, -2.73_69], [-3.05_64, -2.47_80, -2.02_07], [-2.68_89, -1.92_98, -1.76_40]],
] , device=__magic_name__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __magic_name__ , atol=1E-4 ) )
@slow
def _snake_case (self ):
lowerCamelCase__ : Union[str, Any] = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : List[Any] = model.to(__magic_name__ )
lowerCamelCase__ : Optional[int] = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : Dict = image_processor(images=__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : List[str] = model(**__magic_name__ )
lowerCamelCase__ : str = outputs.logits.detach().cpu()
lowerCamelCase__ : Optional[int] = image_processor.post_process_semantic_segmentation(outputs=__magic_name__ , target_sizes=[(50, 60)] )
lowerCamelCase__ : List[Any] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __magic_name__ )
lowerCamelCase__ : List[str] = image_processor.post_process_semantic_segmentation(outputs=__magic_name__ )
lowerCamelCase__ : int = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __magic_name__ )
| 96 | 0 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , _UpperCamelCase , )
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Union[str, Any] = RobertaConfig
_lowerCAmelCase : Any = '''roberta'''
def __init__( self , lowercase__):
super().__init__(lowercase__)
__UpperCAmelCase : str = RobertaEmbeddings(lowercase__)
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , _UpperCamelCase , )
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : str = RobertaConfig
_lowerCAmelCase : Union[str, Any] = '''roberta'''
def __init__( self , lowercase__):
super().__init__(lowercase__)
__UpperCAmelCase : Dict = config.num_labels
__UpperCAmelCase : Any = config.num_hidden_layers
__UpperCAmelCase : Union[str, Any] = DeeRobertaModel(lowercase__)
__UpperCAmelCase : Any = nn.Dropout(config.hidden_dropout_prob)
__UpperCAmelCase : Any = nn.Linear(config.hidden_size , self.config.num_labels)
@add_start_docstrings_to_model_forward(lowercase__)
def A( self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=-1 , lowercase__=False , ):
__UpperCAmelCase : Any = self.num_layers
try:
__UpperCAmelCase : int = self.roberta(
lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , position_ids=lowercase__ , head_mask=lowercase__ , inputs_embeds=lowercase__ , )
__UpperCAmelCase : str = outputs[1]
__UpperCAmelCase : List[Any] = self.dropout(lowercase__)
__UpperCAmelCase : int = self.classifier(lowercase__)
__UpperCAmelCase : List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__UpperCAmelCase : Optional[Any] = e.message
__UpperCAmelCase : Any = e.exit_layer
__UpperCAmelCase : Tuple = outputs[0]
if not self.training:
__UpperCAmelCase : Optional[Any] = entropy(lowercase__)
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : Tuple = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__UpperCAmelCase : Optional[Any] = MSELoss()
__UpperCAmelCase : Any = loss_fct(logits.view(-1) , labels.view(-1))
else:
__UpperCAmelCase : Any = CrossEntropyLoss()
__UpperCAmelCase : int = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
# work with highway exits
__UpperCAmelCase : str = []
for highway_exit in outputs[-1]:
__UpperCAmelCase : str = highway_exit[0]
if not self.training:
highway_logits_all.append(lowercase__)
highway_entropy.append(highway_exit[2])
if self.num_labels == 1:
# We are doing regression
__UpperCAmelCase : Optional[Any] = MSELoss()
__UpperCAmelCase : Dict = loss_fct(highway_logits.view(-1) , labels.view(-1))
else:
__UpperCAmelCase : int = CrossEntropyLoss()
__UpperCAmelCase : int = loss_fct(highway_logits.view(-1 , self.num_labels) , labels.view(-1))
highway_losses.append(lowercase__)
if train_highway:
__UpperCAmelCase : Union[str, Any] = (sum(highway_losses[:-1]),) + outputs
# exclude the final highway, of course
else:
__UpperCAmelCase : Dict = (loss,) + outputs
if not self.training:
__UpperCAmelCase : int = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__UpperCAmelCase : Dict = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 462 |
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : str = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
__UpperCAmelCase : Union[str, Any] = 6
__UpperCAmelCase : Optional[Any] = 1
__UpperCAmelCase : Tuple = 1901
__UpperCAmelCase : Any = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
__UpperCAmelCase : Optional[int] = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
__UpperCAmelCase : Optional[Any] = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
__UpperCAmelCase : Optional[Any] = day - days_per_month[month - 2]
if month > 12:
year += 1
__UpperCAmelCase : int = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 462 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['MaskFormerFeatureExtractor']
a_ = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
a_ = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure) | 719 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """gpt_bigcode"""
snake_case_ = ["""past_key_values"""]
snake_case_ = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any , __lowercase : Any=5_02_57 , __lowercase : int=10_24 , __lowercase : List[str]=7_68 , __lowercase : Optional[int]=12 , __lowercase : Dict=12 , __lowercase : List[str]=None , __lowercase : int="gelu_pytorch_tanh" , __lowercase : Union[str, Any]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[Any]=1e-5 , __lowercase : List[str]=0.02 , __lowercase : Tuple=True , __lowercase : Optional[Any]=True , __lowercase : Union[str, Any]=5_02_56 , __lowercase : List[Any]=5_02_56 , __lowercase : Union[str, Any]=True , __lowercase : List[str]=True , __lowercase : Dict=True , **__lowercase : List[Any] , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =n_positions
SCREAMING_SNAKE_CASE__ : Dict =n_embd
SCREAMING_SNAKE_CASE__ : Dict =n_layer
SCREAMING_SNAKE_CASE__ : Union[str, Any] =n_head
SCREAMING_SNAKE_CASE__ : List[str] =n_inner
SCREAMING_SNAKE_CASE__ : List[str] =activation_function
SCREAMING_SNAKE_CASE__ : List[Any] =resid_pdrop
SCREAMING_SNAKE_CASE__ : List[Any] =embd_pdrop
SCREAMING_SNAKE_CASE__ : List[str] =attn_pdrop
SCREAMING_SNAKE_CASE__ : Dict =layer_norm_epsilon
SCREAMING_SNAKE_CASE__ : List[str] =initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] =scale_attn_weights
SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_cache
SCREAMING_SNAKE_CASE__ : Dict =attention_softmax_in_fpaa
SCREAMING_SNAKE_CASE__ : int =scale_attention_softmax_in_fpaa
SCREAMING_SNAKE_CASE__ : Dict =multi_query
SCREAMING_SNAKE_CASE__ : Optional[Any] =bos_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] =eos_token_id
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase ) | 665 | 0 |
import itertools
import string
from collections.abc import Generator, Iterable
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Generator[tuple[str, ...], None, None]:
lowercase__ = iter(_SCREAMING_SNAKE_CASE )
while True:
lowercase__ = tuple(itertools.islice(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
if not chunk:
return
yield chunk
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
lowercase__ = ''.join([c.upper() for c in dirty if c in string.ascii_letters] )
lowercase__ = ''
if len(_SCREAMING_SNAKE_CASE ) < 2:
return dirty
for i in range(len(_SCREAMING_SNAKE_CASE ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(_SCREAMING_SNAKE_CASE ) & 1:
clean += "X"
return clean
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> list[str]:
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
lowercase__ = 'ABCDEFGHIKLMNOPQRSTUVWXYZ'
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
lowercase__ = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(_SCREAMING_SNAKE_CASE )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(_SCREAMING_SNAKE_CASE )
return table
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
lowercase__ = generate_table(_SCREAMING_SNAKE_CASE )
lowercase__ = prepare_input(_SCREAMING_SNAKE_CASE )
lowercase__ = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_SCREAMING_SNAKE_CASE , 2 ):
lowercase__ , lowercase__ = divmod(table.index(_SCREAMING_SNAKE_CASE ) , 5 )
lowercase__ , lowercase__ = divmod(table.index(_SCREAMING_SNAKE_CASE ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
lowercase__ = generate_table(_SCREAMING_SNAKE_CASE )
lowercase__ = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_SCREAMING_SNAKE_CASE , 2 ):
lowercase__ , lowercase__ = divmod(table.index(_SCREAMING_SNAKE_CASE ) , 5 )
lowercase__ , lowercase__ = divmod(table.index(_SCREAMING_SNAKE_CASE ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 235 |
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> bool:
lowercase__ = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
lowercase__ = set()
return any(
node not in visited and depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for node in graph )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
visited.add(_SCREAMING_SNAKE_CASE )
rec_stk.add(_SCREAMING_SNAKE_CASE )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(_SCREAMING_SNAKE_CASE )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 235 | 1 |
'''simple docstring'''
class snake_case : # Public class to implement a graph
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> None:
lowercase__ = row
lowercase__ = col
lowercase__ = graph
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> bool:
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> None:
# Checking all 8 elements surrounding nth element
lowercase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
lowercase__ = [-1, 0, 1, -1, 1, -1, 0, 1]
lowercase__ = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] ,j + col_nbr[k] ,UpperCAmelCase_ ):
self.diffs(i + row_nbr[k] ,j + col_nbr[k] ,UpperCAmelCase_ )
def _a ( self ) -> int: # And finally, count all islands.
lowercase__ = [[False for j in range(self.COL )] for i in range(self.ROW )]
lowercase__ = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
count += 1
return count
| 539 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class snake_case (unittest.TestCase , UpperCamelCase ):
def _a ( self ) -> List[str]:
lowercase__ = load_tool("text-classification" )
self.tool.setup()
lowercase__ = load_tool("text-classification" ,remote=UpperCAmelCase_ )
def _a ( self ) -> Any:
lowercase__ = self.tool("That's quite cool" ,["positive", "negative"] )
self.assertEqual(UpperCAmelCase_ ,"positive" )
def _a ( self ) -> Optional[int]:
lowercase__ = self.remote_tool("That's quite cool" ,["positive", "negative"] )
self.assertEqual(UpperCAmelCase_ ,"positive" )
def _a ( self ) -> List[Any]:
lowercase__ = self.tool(text="That's quite cool" ,labels=["positive", "negative"] )
self.assertEqual(UpperCAmelCase_ ,"positive" )
def _a ( self ) -> List[Any]:
lowercase__ = self.remote_tool(text="That's quite cool" ,labels=["positive", "negative"] )
self.assertEqual(UpperCAmelCase_ ,"positive" )
| 539 | 1 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def a ( A__ , A__ , A__ ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = LxmertConfig.from_json_file(A__ )
print(f"""Building PyTorch model from configuration: {config}""" )
SCREAMING_SNAKE_CASE__ : str = LxmertForPreTraining(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(A__ , A__ , A__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
a_ :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ :Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 35 |
'''simple docstring'''
def __lowercase ( __lowercase , __lowercase ) -> Optional[int]:
'''simple docstring'''
assert x is not None
assert y is not None
_A = len(__lowercase )
_A = len(__lowercase )
# declaring the array for storing the dp values
_A = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
_A = 1 if x[i - 1] == y[j - 1] else 0
_A = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
_A = ""
_A , _A = m, n
while i > 0 and j > 0:
_A = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
_A = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
lowerCamelCase_ = '''AGGTAB'''
lowerCamelCase_ = '''GXTXAYB'''
lowerCamelCase_ = 4
lowerCamelCase_ = '''GTAB'''
lowerCamelCase_ , lowerCamelCase_ = longest_common_subsequence(a, b)
print('''len =''', ln, ''', sub-sequence =''', subseq)
import doctest
doctest.testmod()
| 330 | 0 |
"""simple docstring"""
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
_A = "bert-base-cased"
_A = "google/pegasus-xsum"
_A = [" Sam ate lunch today.", "Sams lunch ingredients."]
_A = ["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"]
_A = "patrickvonplaten/t5-tiny-random"
_A = "sshleifer/bart-tiny-random"
_A = "sshleifer/tiny-mbart"
_A = "sshleifer/tiny-marian-en-de"
def lowercase (_snake_case ,_snake_case ) -> Tuple:
'''simple docstring'''
__UpperCamelCase = "\n".join(UpperCamelCase__ )
Path(UpperCamelCase__ ).open("w" ).writelines(UpperCamelCase__ )
def lowercase (_snake_case ) -> str:
'''simple docstring'''
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(UpperCamelCase__ ,f"""{split}.source""" ) ,UpperCamelCase__ )
_dump_articles(os.path.join(UpperCamelCase__ ,f"""{split}.target""" ) ,UpperCamelCase__ )
return tmp_dir
class __UpperCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def A ( self : Optional[Any] , A_ : Any )-> List[Any]:
__UpperCamelCase = AutoTokenizer.from_pretrained(_lowercase )
__UpperCamelCase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__UpperCamelCase = max(len(tokenizer.encode(_lowercase ) ) for a in ARTICLES )
__UpperCamelCase = max(len(tokenizer.encode(_lowercase ) ) for a in SUMMARIES )
__UpperCamelCase = 4
__UpperCamelCase = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
__UpperCamelCase , __UpperCamelCase = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error.
__UpperCamelCase = SeqaSeqDataset(
_lowercase , data_dir=_lowercase , type_path="train" , max_source_length=_lowercase , max_target_length=_lowercase , src_lang=_lowercase , tgt_lang=_lowercase , )
__UpperCamelCase = DataLoader(_lowercase , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(_lowercase , _lowercase )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
__UpperCamelCase = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def A ( self : Any , A_ : int )-> Union[str, Any]:
__UpperCamelCase = AutoTokenizer.from_pretrained(_lowercase )
__UpperCamelCase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__UpperCamelCase = max(len(tokenizer.encode(_lowercase ) ) for a in ARTICLES )
__UpperCamelCase = max(len(tokenizer.encode(_lowercase ) ) for a in SUMMARIES )
__UpperCamelCase = 4
__UpperCamelCase = LegacySeqaSeqDataset(
_lowercase , data_dir=_lowercase , type_path="train" , max_source_length=20 , max_target_length=_lowercase , )
__UpperCamelCase = DataLoader(_lowercase , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def A ( self : Optional[int] )-> Dict:
__UpperCamelCase = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" )
__UpperCamelCase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
__UpperCamelCase = tmp_dir.joinpath("train.source" ).open().readlines()
__UpperCamelCase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(_lowercase , _lowercase , 1_28 , _lowercase )
__UpperCamelCase = {x.name for x in tmp_dir.iterdir()}
__UpperCamelCase = {x.name for x in save_dir.iterdir()}
__UpperCamelCase = save_dir.joinpath("train.source" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(_lowercase ) < len(_lowercase )
assert len(_lowercase ) == 1
assert len(packed_examples[0] ) == sum(len(_lowercase ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" )
def A ( self : Any )-> Optional[Any]:
if not FAIRSEQ_AVAILABLE:
return
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self._get_dataset(max_len=64 )
__UpperCamelCase = 64
__UpperCamelCase = ds.make_dynamic_sampler(_lowercase , required_batch_size_multiple=_lowercase )
__UpperCamelCase = [len(_lowercase ) for x in batch_sampler]
assert len(set(_lowercase ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(_lowercase ) == len(_lowercase ) # no dropped or added examples
__UpperCamelCase = DataLoader(_lowercase , batch_sampler=_lowercase , collate_fn=ds.collate_fn , num_workers=2 )
__UpperCamelCase = []
__UpperCamelCase = []
for batch in data_loader:
__UpperCamelCase = batch["input_ids"].shape
__UpperCamelCase = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
__UpperCamelCase = np.product(batch["input_ids"].shape )
num_src_per_batch.append(_lowercase )
if num_src_tokens > (max_tokens * 1.1):
failures.append(_lowercase )
assert num_src_per_batch[0] == max(_lowercase )
if failures:
raise AssertionError(f"""too many tokens in {len(_lowercase )} batches""" )
def A ( self : int )-> Dict:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self._get_dataset(max_len=5_12 )
__UpperCamelCase = 2
__UpperCamelCase = ds.make_sortish_sampler(_lowercase , shuffle=_lowercase )
__UpperCamelCase = DataLoader(_lowercase , batch_size=_lowercase , collate_fn=ds.collate_fn , num_workers=2 )
__UpperCamelCase = DataLoader(_lowercase , batch_size=_lowercase , collate_fn=ds.collate_fn , num_workers=2 , sampler=_lowercase )
__UpperCamelCase = tokenizer.pad_token_id
def count_pad_tokens(A_ : Any , A_ : str="input_ids" ):
return [batch[k].eq(_lowercase ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(_lowercase , k="labels" ) ) < sum(count_pad_tokens(_lowercase , k="labels" ) )
assert sum(count_pad_tokens(_lowercase ) ) < sum(count_pad_tokens(_lowercase ) )
assert len(_lowercase ) == len(_lowercase )
def A ( self : Any , A_ : List[Any]=10_00 , A_ : Any=1_28 )-> Optional[Any]:
if os.getenv("USE_REAL_DATA" , _lowercase ):
__UpperCamelCase = "examples/seq2seq/wmt_en_ro"
__UpperCamelCase = max_len * 2 * 64
if not Path(_lowercase ).joinpath("train.len" ).exists():
save_len_file(_lowercase , _lowercase )
else:
__UpperCamelCase = "examples/seq2seq/test_data/wmt_en_ro"
__UpperCamelCase = max_len * 4
save_len_file(_lowercase , _lowercase )
__UpperCamelCase = AutoTokenizer.from_pretrained(_lowercase )
__UpperCamelCase = SeqaSeqDataset(
_lowercase , data_dir=_lowercase , type_path="train" , max_source_length=_lowercase , max_target_length=_lowercase , n_obs=_lowercase , )
return ds, max_tokens, tokenizer
def A ( self : List[Any] )-> Optional[int]:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self._get_dataset()
__UpperCamelCase = set(DistributedSortishSampler(_lowercase , 2_56 , num_replicas=2 , rank=0 , add_extra_examples=_lowercase ) )
__UpperCamelCase = set(DistributedSortishSampler(_lowercase , 2_56 , num_replicas=2 , rank=1 , add_extra_examples=_lowercase ) )
assert idsa.intersection(_lowercase ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def A ( self : Optional[int] , A_ : List[str] )-> int:
__UpperCamelCase = AutoTokenizer.from_pretrained(_lowercase , use_fast=_lowercase )
if tok_name == MBART_TINY:
__UpperCamelCase = SeqaSeqDataset(
_lowercase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , )
__UpperCamelCase = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
__UpperCamelCase = SeqaSeqDataset(
_lowercase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , )
__UpperCamelCase = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(_lowercase ) == 1 if tok_name == BART_TINY else len(_lowercase ) == 0
| 703 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def A ( self : Union[str, Any] )-> Tuple:
__UpperCamelCase = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
__UpperCamelCase = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
__UpperCamelCase = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
__UpperCamelCase = torch.tensor(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__UpperCamelCase = model(A_ )["last_hidden_state"].detach()
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , A_ , atol=1e-3 ) )
@slow
def A ( self : List[Any] )-> Union[str, Any]:
__UpperCamelCase = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
__UpperCamelCase = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
__UpperCamelCase = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
__UpperCamelCase = torch.tensor(
[[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__UpperCamelCase = model(A_ )["last_hidden_state"].detach()
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , A_ , atol=1e-3 ) ) | 228 | 0 |
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''encodec'''
def __init__( self : Union[str, Any] , lowerCamelCase_ : int=[1.5, 3.0, 6.0, 12.0, 24.0] , lowerCamelCase_ : Any=2_40_00 , lowerCamelCase_ : List[str]=1 , lowerCamelCase_ : Optional[int]=False , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : str=None , lowerCamelCase_ : Optional[int]=1_28 , lowerCamelCase_ : str=32 , lowerCamelCase_ : List[str]=1 , lowerCamelCase_ : List[Any]=[8, 5, 4, 2] , lowerCamelCase_ : Any="weight_norm" , lowerCamelCase_ : str=7 , lowerCamelCase_ : Tuple=7 , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : Union[str, Any]=2 , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : List[str]="reflect" , lowerCamelCase_ : Optional[int]=2 , lowerCamelCase_ : Tuple=2 , lowerCamelCase_ : str=1.0 , lowerCamelCase_ : int=10_24 , lowerCamelCase_ : int=None , lowerCamelCase_ : str=True , **lowerCamelCase_ : Tuple , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = target_bandwidths
SCREAMING_SNAKE_CASE : Any = sampling_rate
SCREAMING_SNAKE_CASE : Optional[Any] = audio_channels
SCREAMING_SNAKE_CASE : Any = normalize
SCREAMING_SNAKE_CASE : Dict = chunk_length_s
SCREAMING_SNAKE_CASE : List[str] = overlap
SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE : List[str] = num_filters
SCREAMING_SNAKE_CASE : List[Any] = num_residual_layers
SCREAMING_SNAKE_CASE : int = upsampling_ratios
SCREAMING_SNAKE_CASE : Optional[Any] = norm_type
SCREAMING_SNAKE_CASE : Dict = kernel_size
SCREAMING_SNAKE_CASE : Dict = last_kernel_size
SCREAMING_SNAKE_CASE : List[Any] = residual_kernel_size
SCREAMING_SNAKE_CASE : Dict = dilation_growth_rate
SCREAMING_SNAKE_CASE : str = use_causal_conv
SCREAMING_SNAKE_CASE : str = pad_mode
SCREAMING_SNAKE_CASE : Optional[Any] = compress
SCREAMING_SNAKE_CASE : Dict = num_lstm_layers
SCREAMING_SNAKE_CASE : List[Any] = trim_right_ratio
SCREAMING_SNAKE_CASE : Optional[int] = codebook_size
SCREAMING_SNAKE_CASE : List[str] = codebook_dim if codebook_dim is not None else hidden_size
SCREAMING_SNAKE_CASE : Optional[Any] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' )
super().__init__(**lowerCamelCase_ )
@property
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 379 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = [0] * no_of_processes
SCREAMING_SNAKE_CASE : int = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[int] = burst_time[i]
SCREAMING_SNAKE_CASE : list[int] = []
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : List[Any] = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : Any = -1
for i in range(lowerCamelCase_ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
SCREAMING_SNAKE_CASE : Any = i
total_time += burst_time[target_process]
completed += 1
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : Optional[int] = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = [0] * no_of_processes
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
__UpperCAmelCase = 4
__UpperCAmelCase = [2, 5, 3, 7]
__UpperCAmelCase = [0, 0, 0, 0]
__UpperCAmelCase = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__UpperCAmelCase = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
f'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
f'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(f'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(f'''Average turnaround time = {mean(turn_around_time):.5f}''')
| 379 | 1 |
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
# 1536-bit
5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 2048-bit
1_4: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AACAA68FFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 3072-bit
1_5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 4096-bit
1_6: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'
+ 'FFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 6144-bit
1_7: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'
+ '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'
+ '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'
+ 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'
+ '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'
+ 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'
+ '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'
+ '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'
+ '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'
+ 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'
+ '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'
+ 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'
+ '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'
+ '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'
+ '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'
+ 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'
+ 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'
+ 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'
+ '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'
+ 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'
+ 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'
+ 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'
+ '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'
+ '6DCC4024FFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 8192-bit
1_8: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'
+ 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'
+ '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'
+ 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'
+ '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'
+ 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'
+ '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'
+ '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'
+ 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'
+ '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'
+ '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'
+ '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'
+ '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'
+ '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'
+ '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'
+ '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'
+ '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'
+ 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'
+ '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'
+ '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'
+ '60C980DD98EDD3DFFFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
}
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ = 14 ):
if group not in primes:
raise ValueError('''Unsupported Group''' )
A__ : List[str] = primes[group]['''prime''']
A__ : Tuple = primes[group]['''generator''']
A__ : Optional[int] = int(hexlify(urandom(32 ) ) , base=16 )
def __snake_case ( self ):
return hex(self.__private_key )[2:]
def __snake_case ( self ):
A__ : Optional[Any] = pow(self.generator , self.__private_key , self.prime )
return hex(UpperCamelCase__ )[2:]
def __snake_case ( self , UpperCamelCase__ ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(UpperCamelCase__ , (self.prime - 1) // 2 , self.prime ) == 1
)
def __snake_case ( self , UpperCamelCase__ ):
A__ : str = int(UpperCamelCase__ , base=16 )
if not self.is_valid_public_key(UpperCamelCase__ ):
raise ValueError('''Invalid public key''' )
A__ : List[Any] = pow(UpperCamelCase__ , self.__private_key , self.prime )
return shaaaa(str(UpperCamelCase__ ).encode() ).hexdigest()
@staticmethod
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(UpperCamelCase__ , (prime - 1) // 2 , UpperCamelCase__ ) == 1
)
@staticmethod
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 14 ):
A__ : Optional[Any] = int(UpperCamelCase__ , base=16 )
A__ : Any = int(UpperCamelCase__ , base=16 )
A__ : int = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('''Invalid public key''' )
A__ : Union[str, Any] = pow(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return shaaaa(str(UpperCamelCase__ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod() | 703 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
A__ : Dict = inspect.getfile(accelerate.test_utils )
A__ : Any = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
A__ : Tuple = test_metrics
@require_cpu
def __snake_case ( self ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def __snake_case ( self ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def __snake_case ( self ):
self.test_metrics.main()
@require_multi_gpu
def __snake_case ( self ):
print(F"Found {torch.cuda.device_count()} devices." )
A__ : int = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() ) | 55 | 0 |
def UpperCamelCase ( _a ) -> str:
'''simple docstring'''
lowercase_ :Tuple = 0
for ch in input_str:
lowercase_ :Optional[Any] = ord(_a )
lowercase_ :Tuple = pow(2 , _a )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 257 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def _A ( SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
a__ : Optional[Any] =SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
a__ : Optional[int] =4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
a__ : int =4
a__ : Optional[int] =48
a__ : str ="pixelshuffle_aux"
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
a__ : str =[6, 6, 6, 6]
a__ : Optional[int] =60
a__ : Any =[6, 6, 6, 6]
a__ : int ="pixelshuffledirect"
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
a__ : List[str] =4
a__ : Union[str, Any] ="nearest+conv"
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
a__ : str =1
a__ : Optional[Any] =1
a__ : str =126
a__ : Optional[Any] =7
a__ : Optional[int] =2_5_5.0
a__ : str =""
return config
def _A ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
if "patch_embed.proj" in name and "layers" not in name:
a__ : Any =name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
a__ : str =name.replace("patch_embed.norm" , "embeddings.patch_embeddings.layernorm" )
if "layers" in name:
a__ : Union[str, Any] =name.replace("layers" , "encoder.stages" )
if "residual_group.blocks" in name:
a__ : List[Any] =name.replace("residual_group.blocks" , "layers" )
if "attn.proj" in name:
a__ : Union[str, Any] =name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
a__ : int =name.replace("attn" , "attention.self" )
if "norm1" in name:
a__ : List[Any] =name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
a__ : Optional[int] =name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
a__ : Dict =name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
a__ : Optional[int] =name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
a__ : List[Any] =name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
a__ : Optional[int] =name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
a__ : Optional[Any] =name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
a__ : List[str] =name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if "patch_embed.proj" in name:
a__ : List[Any] =name.replace("patch_embed.proj" , "patch_embed.projection" )
if name == "norm.weight":
a__ : Dict ="layernorm.weight"
if name == "norm.bias":
a__ : Any ="layernorm.bias"
if "conv_first" in name:
a__ : Tuple =name.replace("conv_first" , "first_convolution" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
a__ : List[str] =name.replace("conv_last" , "final_convolution" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
a__ : str =name.replace("conv_before_upsample.0" , "conv_before_upsample" )
if "upsample.0" in name:
a__ : Any =name.replace("upsample.0" , "upsample.convolution_0" )
if "upsample.2" in name:
a__ : Optional[int] =name.replace("upsample.2" , "upsample.convolution_1" )
a__ : Any ="upsample." + name
elif config.upsampler == "pixelshuffledirect":
a__ : str =name.replace("upsample.0.weight" , "upsample.conv.weight" )
a__ : Any =name.replace("upsample.0.bias" , "upsample.conv.bias" )
else:
pass
else:
a__ : Dict ="swin2sr." + name
return name
def _A ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
a__ : Dict =orig_state_dict.pop(SCREAMING_SNAKE_CASE )
if "qkv" in key:
a__ : str =key.split("." )
a__ : Optional[int] =int(key_split[1] )
a__ : Dict =int(key_split[4] )
a__ : List[Any] =config.embed_dim
if "weight" in key:
a__ : List[Any] =val[:dim, :]
a__ : List[str] =val[dim : dim * 2, :]
a__ : Dict =val[-dim:, :]
else:
a__ : int =val[:dim]
a__ : Union[str, Any] =val[dim : dim * 2]
a__ : Tuple =val[-dim:]
pass
else:
a__ : Union[str, Any] =val
return orig_state_dict
def _A ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
a__ : Optional[Any] =get_config(SCREAMING_SNAKE_CASE )
a__ : Union[str, Any] =SwinaSRForImageSuperResolution(SCREAMING_SNAKE_CASE )
model.eval()
a__ : Union[str, Any] =torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location="cpu" )
a__ : Dict =convert_state_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ , a__ : List[Any] =model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
raise ValueError("Missing keys when converting: {}".format(SCREAMING_SNAKE_CASE ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f'''Unexpected key {key} in state_dict''' )
# verify values
a__ : str ="https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"
a__ : List[Any] =Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ).convert("RGB" )
a__ : Dict =SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
a__ : List[str] =126 if "Jpeg" in checkpoint_url else 256
a__ : Optional[Any] =Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
a__ : Dict =transforms(SCREAMING_SNAKE_CASE ).unsqueeze(0 )
if config.num_channels == 1:
a__ : Tuple =pixel_values[:, 0, :, :].unsqueeze(1 )
a__ : Union[str, Any] =model(SCREAMING_SNAKE_CASE )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
a__ : str =torch.Size([1, 3, 512, 512] )
a__ : List[str] =torch.tensor(
[[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
a__ : List[Any] =torch.Size([1, 3, 1_024, 1_024] )
a__ : List[str] =torch.tensor(
[[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
a__ : Tuple =torch.Size([1, 3, 1_024, 1_024] )
a__ : Optional[int] =torch.tensor(
[[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
a__ : Tuple =torch.Size([1, 3, 512, 512] )
a__ : str =torch.tensor(
[[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
a__ : Optional[int] =torch.Size([1, 3, 1_024, 1_024] )
a__ : Optional[Any] =torch.tensor(
[[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] )
assert (
outputs.reconstruction.shape == expected_shape
), f'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'''
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-3 )
print("Looks ok!" )
a__ : int ={
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth": (
"swin2SR-classical-sr-x2-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth": (
"swin2SR-classical-sr-x4-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth": (
"swin2SR-compressed-sr-x4-48"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth": (
"swin2SR-lightweight-x2-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth": (
"swin2SR-realworld-sr-x4-64-bsrgan-psnr"
),
}
a__ : Any =url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
model.push_to_hub(f'''caidas/{model_name}''' )
processor.push_to_hub(f'''caidas/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""",
type=str,
help="""URL of the original Swin2SR checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the converted model to the hub.""")
UpperCAmelCase : Optional[Any] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 563 | 0 |
from __future__ import annotations
import requests
def A__ ( SCREAMING_SNAKE_CASE_ ) -> Any:
lowerCamelCase : List[str] =F"https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"
return requests.get(_UpperCAmelCase ).json()
def A__ ( SCREAMING_SNAKE_CASE_ = 1_0 ) -> Tuple:
lowerCamelCase : int ='''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
lowerCamelCase : List[Any] =requests.get(_UpperCAmelCase ).json()[:max_stories]
return [get_hackernews_story(_UpperCAmelCase ) for story_id in story_ids]
def A__ ( SCREAMING_SNAKE_CASE_ = 1_0 ) -> int:
lowerCamelCase : Tuple =hackernews_top_stories(_UpperCAmelCase )
return "\n".join('''* [{title}]({url})'''.format(**_UpperCAmelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 716 |
import string
def A__ ( SCREAMING_SNAKE_CASE_ ) -> str:
lowerCamelCase : Optional[Any] =''''''
for i in sequence:
lowerCamelCase : int =ord(SCREAMING_SNAKE_CASE_ )
if 6_5 <= extract <= 9_0:
output += chr(1_5_5 - extract )
elif 9_7 <= extract <= 1_2_2:
output += chr(2_1_9 - extract )
else:
output += i
return output
def A__ ( SCREAMING_SNAKE_CASE_ ) -> str:
lowerCamelCase : Tuple =string.ascii_letters
lowerCamelCase : int =string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(SCREAMING_SNAKE_CASE_ )] if c in letters else c for c in sequence )
def A__ ( ) -> None:
from timeit import timeit
print('''Running performance benchmarks...''' )
lowerCamelCase : Tuple ='''from string import printable ; from __main__ import atbash, atbash_slow'''
print(F"> atbash_slow(): {timeit('atbash_slow(printable)' , setup=SCREAMING_SNAKE_CASE_ )} seconds" )
print(F"> atbash(): {timeit('atbash(printable)' , setup=SCREAMING_SNAKE_CASE_ )} seconds" )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 262 | 0 |
'''simple docstring'''
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> List[str]:
lowerCAmelCase__ = n
lowerCAmelCase__ = [None] * self.n
lowerCAmelCase__ = 0 # index of the first element
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
def __len__( self ) -> int:
return self.size
def __SCREAMING_SNAKE_CASE ( self ) -> bool:
return self.size == 0
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
return False if self.is_empty() else self.array[self.front]
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Optional[int]:
if self.size >= self.n:
raise Exception('''QUEUE IS FULL''' )
lowerCAmelCase__ = data
lowerCAmelCase__ = (self.rear + 1) % self.n
self.size += 1
return self
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
if self.size == 0:
raise Exception('''UNDERFLOW''' )
lowerCAmelCase__ = self.array[self.front]
lowerCAmelCase__ = None
lowerCAmelCase__ = (self.front + 1) % self.n
self.size -= 1
return temp | 90 |
"""simple docstring"""
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _snake_case ( ):
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(snake_case__ ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def _snake_case ( ):
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def _snake_case ( ):
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(snake_case__ ):
http_head('https://huggingface.co' ) | 91 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 708 | '''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
__a = logging.get_logger(__name__)
class A__ ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : Dict , *lowerCAmelCase__ : Dict , **lowerCAmelCase__ : Any ) -> None:
"""simple docstring"""
warnings.warn(
"The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use MobileViTImageProcessor instead." , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ ) | 257 | 0 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__UpperCAmelCase = {
'''<''': operator.lt,
'''<=''': operator.le,
'''==''': operator.eq,
'''!=''': operator.ne,
'''>=''': operator.ge,
'''>''': operator.gt,
}
def UpperCamelCase ( snake_case__ : str , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Tuple ) -> Optional[Any]:
if got_ver is None or want_ver is None:
raise ValueError(
F"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
F""" reinstalling {pkg}.""" )
if not ops[op](version.parse(snake_case__ ) , version.parse(snake_case__ ) ):
raise ImportError(
F"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def UpperCamelCase ( snake_case__ : str , snake_case__ : Optional[str] = None ) -> None:
UpperCamelCase : Union[str, Any] = F"""\n{hint}""" if hint is not None else ''
# non-versioned check
if re.match(R'^[\w_\-\d]+$' , snake_case__ ):
UpperCamelCase , UpperCamelCase , UpperCamelCase : str = requirement, None, None
else:
UpperCamelCase : Union[str, Any] = re.findall(R'^([^!=<>\s]+)([\s!=<>]{1,2}.+)' , snake_case__ )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'
F""" got {requirement}""" )
UpperCamelCase , UpperCamelCase : Dict = match[0]
UpperCamelCase : Dict = want_full.split(',' ) # there could be multiple requirements
UpperCamelCase : Any = {}
for w in want_range:
UpperCamelCase : Union[str, Any] = re.findall(R'^([\s!=<>]{1,2})(.+)' , snake_case__ )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'
F""" but got {requirement}""" )
UpperCamelCase , UpperCamelCase : Any = match[0]
UpperCamelCase : Any = want_ver
if op not in ops:
raise ValueError(F"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
UpperCamelCase : Optional[int] = '.'.join([str(snake_case__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
return
# check if any version is installed
try:
UpperCamelCase : Tuple = importlib.metadata.version(snake_case__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def UpperCamelCase ( snake_case__ : List[str] ) -> Optional[Any]:
UpperCamelCase : Any = 'Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'
return require_version(snake_case__ , snake_case__ )
| 40 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : int = IFPipeline
UpperCAmelCase__ : List[str] = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
UpperCAmelCase__ : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase__ : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"}
def snake_case_ ( self ) -> str:
return self._get_dummy_components()
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ) -> Union[str, Any]:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
UpperCamelCase : List[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : str = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def snake_case_ ( self ) -> Optional[int]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda', reason='float16 requires CUDA' )
def snake_case_ ( self ) -> str:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def snake_case_ ( self ) -> Dict:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def snake_case_ ( self ) -> Optional[int]:
self._test_save_load_local()
def snake_case_ ( self ) -> List[str]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2, )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(), reason='XFormers attention is only available with CUDA and `xformers` installed', )
def snake_case_ ( self ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ) -> List[Any]:
# if
UpperCamelCase : Union[str, Any] = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0', variant='fp16', torch_dtype=torch.floataa )
UpperCamelCase : str = IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0', variant='fp16', torch_dtype=torch.floataa, text_encoder=SCREAMING_SNAKE_CASE_, tokenizer=SCREAMING_SNAKE_CASE_ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
UpperCamelCase , UpperCamelCase : List[str] = pipe_a.encode_prompt('anime turtle', device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
UpperCamelCase : int = None
UpperCamelCase : Union[str, Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
UpperCamelCase : Optional[int] = IFImgaImgPipeline(**pipe_a.components )
UpperCamelCase : List[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
UpperCamelCase : Union[str, Any] = IFInpaintingPipeline(**pipe_a.components )
UpperCamelCase : Union[str, Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Any:
# pipeline 1
_start_torch_memory_measurement()
UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : str = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', )
UpperCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (64, 64, 3)
UpperCamelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
UpperCamelCase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : Tuple = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', )
UpperCamelCase : Tuple = output.images[0]
assert image.shape == (256, 256, 3)
UpperCamelCase : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCamelCase : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
# pipeline 1
_start_torch_memory_measurement()
UpperCamelCase : str = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : Any = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', )
UpperCamelCase : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
UpperCamelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
UpperCamelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase : int = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : str = floats_tensor((1, 3, 256, 256), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, original_image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', )
UpperCamelCase : Any = output.images[0]
assert image.shape == (256, 256, 3)
UpperCamelCase : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCamelCase : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
# pipeline 1
_start_torch_memory_measurement()
UpperCamelCase : Dict = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = floats_tensor((1, 3, 64, 64), rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : Any = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, mask_image=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', )
UpperCamelCase : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
UpperCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
UpperCamelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : str = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = floats_tensor((1, 3, 256, 256), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = floats_tensor((1, 3, 256, 256), rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, mask_image=SCREAMING_SNAKE_CASE_, original_image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', )
UpperCamelCase : Optional[int] = output.images[0]
assert image.shape == (256, 256, 3)
UpperCamelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCamelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( ) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 40 | 1 |
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) == 0 )
def __lowerCAmelCase ()-> None:
"""simple docstring"""
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1)) | 718 |
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
UpperCAmelCase = 2_9979_2458
# Symbols
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = symbols("""ct x y z""")
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> float:
"""simple docstring"""
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> float:
"""simple docstring"""
return 1 / sqrt(1 - beta(SCREAMING_SNAKE_CASE ) ** 2 )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> np.ndarray:
"""simple docstring"""
return np.array(
[
[gamma(SCREAMING_SNAKE_CASE ), -gamma(SCREAMING_SNAKE_CASE ) * beta(SCREAMING_SNAKE_CASE ), 0, 0],
[-gamma(SCREAMING_SNAKE_CASE ) * beta(SCREAMING_SNAKE_CASE ), gamma(SCREAMING_SNAKE_CASE ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None )-> np.ndarray:
"""simple docstring"""
if event is None:
snake_case_ = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(SCREAMING_SNAKE_CASE ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
UpperCAmelCase = transform(2997_9245)
print("""Example of four vector: """)
print(f'''ct\' = {four_vector[0]}''')
print(f'''x\' = {four_vector[1]}''')
print(f'''y\' = {four_vector[2]}''')
print(f'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
UpperCAmelCase = {ct: c, x: 1, y: 1, z: 1}
UpperCAmelCase = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f'''\n{numerical_vector}''') | 531 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case = logging.get_logger(__name__)
snake_case = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
snake_case = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
snake_case = {
"""gpt2""": 1_0_2_4,
"""gpt2-medium""": 1_0_2_4,
"""gpt2-large""": 1_0_2_4,
"""gpt2-xl""": 1_0_2_4,
"""distilgpt2""": 1_0_2_4,
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Dict = ['''input_ids''', '''attention_mask''']
SCREAMING_SNAKE_CASE_ : Any = GPTaTokenizer
def __init__( self : int ,__A : Optional[int]=None ,__A : Optional[int]=None ,__A : List[str]=None ,__A : Any="<|endoftext|>" ,__A : int="<|endoftext|>" ,__A : List[str]="<|endoftext|>" ,__A : str=False ,**__A : Union[str, Any] ,) -> int:
super().__init__(
__A ,__A ,tokenizer_file=__A ,unk_token=__A ,bos_token=__A ,eos_token=__A ,add_prefix_space=__A ,**__A ,)
_lowercase = kwargs.pop('add_bos_token' ,__A )
_lowercase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' ,__A ) != add_prefix_space:
_lowercase = getattr(__A ,pre_tok_state.pop('type' ) )
_lowercase = add_prefix_space
_lowercase = pre_tok_class(**__A )
_lowercase = add_prefix_space
def __UpperCAmelCase ( self : Any ,*__A : List[Any] ,**__A : Tuple ) -> BatchEncoding:
_lowercase = kwargs.get('is_split_into_words' ,__A )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__A ,**__A )
def __UpperCAmelCase ( self : int ,*__A : Any ,**__A : Any ) -> BatchEncoding:
_lowercase = kwargs.get('is_split_into_words' ,__A )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__A ,**__A )
def __UpperCAmelCase ( self : int ,__A : str ,__A : Optional[str] = None ) -> Tuple[str]:
_lowercase = self._tokenizer.model.save(__A ,name=__A )
return tuple(__A )
def __UpperCAmelCase ( self : List[Any] ,__A : "Conversation" ) -> List[int]:
_lowercase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__A ,add_special_tokens=__A ) + [self.eos_token_id] )
if len(__A ) > self.model_max_length:
_lowercase = input_ids[-self.model_max_length :]
return input_ids | 67 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
snake_case = Lock()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :Union[str, Any] , snake_case__ :Tuple , snake_case__ :Any , snake_case__ :Dict , snake_case__ :Optional[int] , snake_case__ :List[str] ) -> Optional[Any]:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
_lowercase = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
_lowercase = min(snake_case__ , snake_case__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
_lowercase = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
_lowercase = max(snake_case__ , snake_case__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Dict:
_lowercase = []
_lowercase = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
_lowercase = Pipe()
_lowercase = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
_lowercase = temp_rs
_lowercase = temp_rr
for i in range(1 , len(snake_case__ ) - 1 ):
_lowercase = Pipe()
_lowercase = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
_lowercase = temp_rs
_lowercase = temp_rr
process_array_.append(
Process(
target=snake_case__ , args=(
len(snake_case__ ) - 1,
arr[len(snake_case__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case__ ) ):
_lowercase = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
_lowercase = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*snake_case__ )
_lowercase = odd_even_transposition(snake_case__ )
print('Sorted List\n' )
print(*snake_case__ )
if __name__ == "__main__":
main() | 67 | 1 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
lowerCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ (__a ):
def __init__( self , _a , _a , _a , _a , _a , _a , _a , ) -> List[Any]:
super().__init__()
self.register_modules(
vae=A__ , text_encoder=A__ , tokenizer=A__ , unet=A__ , scheduler=A__ , safety_checker=A__ , feature_extractor=A__ , )
def __a ( self , _a = "auto" ) -> Optional[Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCAmelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A__ )
def __a ( self ) -> Union[str, Any]:
self.enable_attention_slicing(A__ )
@torch.no_grad()
def __call__( self , _a , _a = 512 , _a = 512 , _a = 50 , _a = 7.5 , _a = None , _a = 1 , _a = 0.0 , _a = None , _a = None , _a = "pil" , _a = True , _a = None , _a = 1 , _a = None , **_a , ) -> Optional[Any]:
if isinstance(A__ , A__ ):
lowerCAmelCase_ = 1
elif isinstance(A__ , A__ ):
lowerCAmelCase_ = len(A__ )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(A__ )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A__ , A__ ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(A__ )}." )
# get prompt text embeddings
lowerCAmelCase_ = self.tokenizer(
A__ , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
lowerCAmelCase_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCAmelCase_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
lowerCAmelCase_ = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
lowerCAmelCase_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = text_embeddings.shape
lowerCAmelCase_ = text_embeddings.repeat(1 , A__ , 1 )
lowerCAmelCase_ = text_embeddings.view(bs_embed * num_images_per_prompt , A__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCAmelCase_ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCAmelCase_ = 42
if negative_prompt is None:
lowerCAmelCase_ = [""]
elif type(A__ ) is not type(A__ ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(A__ )} !="
f" {type(A__ )}." )
elif isinstance(A__ , A__ ):
lowerCAmelCase_ = [negative_prompt]
elif batch_size != len(A__ ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(A__ )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
lowerCAmelCase_ = negative_prompt
lowerCAmelCase_ = text_input_ids.shape[-1]
lowerCAmelCase_ = self.tokenizer(
A__ , padding="max_length" , max_length=A__ , truncation=A__ , return_tensors="pt" , )
lowerCAmelCase_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase_ = uncond_embeddings.shape[1]
lowerCAmelCase_ = uncond_embeddings.repeat(A__ , A__ , 1 )
lowerCAmelCase_ = uncond_embeddings.view(batch_size * num_images_per_prompt , A__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase_ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCAmelCase_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCAmelCase_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
lowerCAmelCase_ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCAmelCase_ = torch.randn(
A__ , generator=A__ , device="cpu" , dtype=A__ ).to(self.device )
lowerCAmelCase_ = torch.randn(A__ , generator=A__ , device="cpu" , dtype=A__ ).to(
self.device )
else:
lowerCAmelCase_ = torch.randn(
A__ , generator=A__ , device=self.device , dtype=A__ )
lowerCAmelCase_ = torch.randn(A__ , generator=A__ , device=self.device , dtype=A__ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
lowerCAmelCase_ = latents_reference.to(self.device )
lowerCAmelCase_ = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
lowerCAmelCase_ = (latents_shape[3] - latents_shape_reference[3]) // 2
lowerCAmelCase_ = (latents_shape[2] - latents_shape_reference[2]) // 2
lowerCAmelCase_ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
lowerCAmelCase_ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
lowerCAmelCase_ = 0 if dx < 0 else dx
lowerCAmelCase_ = 0 if dy < 0 else dy
lowerCAmelCase_ = max(-dx , 0 )
lowerCAmelCase_ = max(-dy , 0 )
# import pdb
# pdb.set_trace()
lowerCAmelCase_ = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(A__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCAmelCase_ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCAmelCase_ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase_ = {}
if accepts_eta:
lowerCAmelCase_ = eta
for i, t in enumerate(self.progress_bar(A__ ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase_ = self.scheduler.scale_model_input(A__ , A__ )
# predict the noise residual
lowerCAmelCase_ = self.unet(A__ , A__ , encoder_hidden_states=A__ ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCAmelCase_ , lowerCAmelCase_ = noise_pred.chunk(2 )
lowerCAmelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase_ = self.scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A__ , A__ , A__ )
lowerCAmelCase_ = 1 / 0.1_8_2_1_5 * latents
lowerCAmelCase_ = self.vae.decode(A__ ).sample
lowerCAmelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
lowerCAmelCase_ = self.feature_extractor(self.numpy_to_pil(A__ ) , return_tensors="pt" ).to(
self.device )
lowerCAmelCase_ , lowerCAmelCase_ = self.safety_checker(
images=A__ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
lowerCAmelCase_ = None
if output_type == "pil":
lowerCAmelCase_ = self.numpy_to_pil(A__ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=A__ , nsfw_content_detected=A__ )
| 700 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __magic_name__ (unittest.TestCase ):
def __a ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __a ( self ) -> Dict:
lowerCAmelCase_ = 1
lowerCAmelCase_ = 3
lowerCAmelCase_ = (32, 32)
lowerCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_a )
return image
@property
def __a ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def __a ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
lowerCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def __a ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(_a )
@property
def __a ( self ) -> List[str]:
def extract(*_a , **_a ):
class __magic_name__ :
def __init__( self ) -> List[str]:
lowerCAmelCase_ = torch.ones([0] )
def __a ( self , _a ) -> int:
self.pixel_values.to(_a )
return self
return Out()
return extract
def __a ( self ) -> Dict:
lowerCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ = self.dummy_cond_unet
lowerCAmelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=_a , set_alpha_to_one=_a , )
lowerCAmelCase_ = self.dummy_vae
lowerCAmelCase_ = self.dummy_text_encoder
lowerCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
lowerCAmelCase_ = StableDiffusionPipeline(
unet=_a , scheduler=_a , vae=_a , text_encoder=_a , tokenizer=_a , safety_checker=_a , feature_extractor=self.dummy_extractor , )
lowerCAmelCase_ = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = "A painting of a squirrel eating a burger"
lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(0 )
lowerCAmelCase_ = sd_pipe([prompt] , generator=_a , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(0 )
lowerCAmelCase_ = sd_pipe(
[prompt] , generator=_a , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=_a , )[0]
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase_ = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ = self.dummy_cond_unet
lowerCAmelCase_ = PNDMScheduler(skip_prk_steps=_a )
lowerCAmelCase_ = self.dummy_vae
lowerCAmelCase_ = self.dummy_text_encoder
lowerCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
lowerCAmelCase_ = StableDiffusionPipeline(
unet=_a , scheduler=_a , vae=_a , text_encoder=_a , tokenizer=_a , safety_checker=_a , feature_extractor=self.dummy_extractor , )
lowerCAmelCase_ = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = "A painting of a squirrel eating a burger"
lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(0 )
lowerCAmelCase_ = sd_pipe([prompt] , generator=_a , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(0 )
lowerCAmelCase_ = sd_pipe(
[prompt] , generator=_a , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=_a , )[0]
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase_ = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __a ( self ) -> Any:
lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-lms-pipe" , safety_checker=_a )
assert isinstance(_a , _a )
assert isinstance(pipe.scheduler , _a )
assert pipe.safety_checker is None
lowerCAmelCase_ = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_a )
lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained(_a )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowerCAmelCase_ = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def __a ( self ) -> Any:
lowerCAmelCase_ = self.dummy_cond_unet
lowerCAmelCase_ = PNDMScheduler(skip_prk_steps=_a )
lowerCAmelCase_ = self.dummy_vae
lowerCAmelCase_ = self.dummy_text_encoder
lowerCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# put models in fp16
lowerCAmelCase_ = unet.half()
lowerCAmelCase_ = vae.half()
lowerCAmelCase_ = bert.half()
# make sure here that pndm scheduler skips prk
lowerCAmelCase_ = StableDiffusionPipeline(
unet=_a , scheduler=_a , vae=_a , text_encoder=_a , tokenizer=_a , safety_checker=_a , feature_extractor=self.dummy_extractor , )
lowerCAmelCase_ = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = "A painting of a squirrel eating a burger"
lowerCAmelCase_ = sd_pipe([prompt] , num_inference_steps=2 , output_type="np" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __magic_name__ (unittest.TestCase ):
def __a ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> Any:
lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=_a )
lowerCAmelCase_ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowerCAmelCase_ = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = (
"portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"
" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"
" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"
" children from bahnhof zoo, detailed "
)
lowerCAmelCase_ = 4003660346
lowerCAmelCase_ = 7
# without safety guidance (sld_guidance_scale = 0)
lowerCAmelCase_ = torch.manual_seed(_a )
lowerCAmelCase_ = sd_pipe(
[prompt] , generator=_a , guidance_scale=_a , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
lowerCAmelCase_ = torch.manual_seed(_a )
lowerCAmelCase_ = sd_pipe(
[prompt] , generator=_a , guidance_scale=_a , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=_a )
lowerCAmelCase_ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowerCAmelCase_ = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = "padme amidala taking a bath artwork, safe for work, no nudity"
lowerCAmelCase_ = 2734971755
lowerCAmelCase_ = 7
lowerCAmelCase_ = torch.manual_seed(_a )
lowerCAmelCase_ = sd_pipe(
[prompt] , generator=_a , guidance_scale=_a , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
lowerCAmelCase_ = torch.manual_seed(_a )
lowerCAmelCase_ = sd_pipe(
[prompt] , generator=_a , guidance_scale=_a , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __a ( self ) -> int:
lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" )
lowerCAmelCase_ = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = (
"the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."
" leyendecker"
)
lowerCAmelCase_ = 1044355234
lowerCAmelCase_ = 12
lowerCAmelCase_ = torch.manual_seed(_a )
lowerCAmelCase_ = sd_pipe(
[prompt] , generator=_a , guidance_scale=_a , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
lowerCAmelCase_ = torch.manual_seed(_a )
lowerCAmelCase_ = sd_pipe(
[prompt] , generator=_a , guidance_scale=_a , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 226 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
snake_case = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 67 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> list:
_lowercase = [0] * len(snake_case__ )
for i in range(1 , len(snake_case__ ) ):
# use last results for better performance - dynamic programming
_lowercase = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_lowercase = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_lowercase = j
return prefix_result
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> int:
return max(prefix_function(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 67 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['ReformerTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['ReformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ReformerAttention',
'ReformerForMaskedLM',
'ReformerForQuestionAnswering',
'ReformerForSequenceClassification',
'ReformerLayer',
'ReformerModel',
'ReformerModelWithLMHead',
'ReformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 709 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( snake_case , unittest.TestCase ):
__lowerCamelCase: Dict = KandinskyVaaPriorPipeline
__lowerCamelCase: Optional[int] = ['prompt']
__lowerCamelCase: Any = ['prompt', 'negative_prompt']
__lowerCamelCase: List[Any] = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
__lowerCamelCase: List[Any] = False
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return 3_2
@property
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
return 3_2
@property
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return 1_0_0
@property
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(a )
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : List[str] = {
"num_attention_heads": 2,
"attention_head_dim": 1_2,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
lowercase_ : Union[str, Any] = PriorTransformer(**a )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
lowercase_ : List[Any] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Dict = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
lowercase_ : Optional[Any] = CLIPVisionModelWithProjection(a )
return model
@property
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : List[str] = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=a , do_normalize=a , do_resize=a , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_2_4 , )
return image_processor
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Any = self.dummy_prior
lowercase_ : Optional[Any] = self.dummy_image_encoder
lowercase_ : List[Any] = self.dummy_text_encoder
lowercase_ : Any = self.dummy_tokenizer
lowercase_ : Optional[Any] = self.dummy_image_processor
lowercase_ : List[str] = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=a , clip_sample_range=10.0 , )
lowercase_ : List[Any] = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def lowerCAmelCase__ ( self : Any , a : Dict , a : Dict=0 ):
'''simple docstring'''
if str(a ).startswith("mps" ):
lowercase_ : int = torch.manual_seed(a )
else:
lowercase_ : Optional[Any] = torch.Generator(device=a ).manual_seed(a )
lowercase_ : Any = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : str = "cpu"
lowercase_ : Any = self.get_dummy_components()
lowercase_ : int = self.pipeline_class(**a )
lowercase_ : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase_ : Any = pipe(**self.get_dummy_inputs(a ) )
lowercase_ : List[Any] = output.image_embeds
lowercase_ : str = pipe(
**self.get_dummy_inputs(a ) , return_dict=a , )[0]
lowercase_ : Any = image[0, -1_0:]
lowercase_ : Dict = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
lowercase_ : int = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : int = torch_device == "cpu"
lowercase_ : Tuple = True
lowercase_ : str = False
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , test_mean_pixel_difference=a , )
@skip_mps
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : Any = torch_device == "cpu"
lowercase_ : int = False
self._test_attention_slicing_forward_pass(
test_max_difference=a , test_mean_pixel_difference=a , )
| 640 | 0 |
def _lowerCamelCase ( snake_case , snake_case ):
_enforce_args(snake_case , snake_case )
if n == 0:
return 0
_lowerCAmelCase = float('-inf' )
for i in range(1 , n + 1 ):
_lowerCAmelCase = max(
snake_case , prices[i - 1] + naive_cut_rod_recursive(n - i , snake_case ) )
return max_revue
def _lowerCamelCase ( snake_case , snake_case ):
_enforce_args(snake_case , snake_case )
_lowerCAmelCase = [float('-inf' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(snake_case , snake_case , snake_case )
def _lowerCamelCase ( snake_case , snake_case , snake_case ):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_lowerCAmelCase = float('-inf' )
for i in range(1 , n + 1 ):
_lowerCAmelCase = max(
snake_case , prices[i - 1] + _top_down_cut_rod_recursive(n - i , snake_case , snake_case ) , )
_lowerCAmelCase = max_revenue
return max_rev[n]
def _lowerCamelCase ( snake_case , snake_case ):
_enforce_args(snake_case , snake_case )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_lowerCAmelCase = [float('-inf' ) for _ in range(n + 1 )]
_lowerCAmelCase = 0
for i in range(1 , n + 1 ):
_lowerCAmelCase = max_rev[i]
for j in range(1 , i + 1 ):
_lowerCAmelCase = max(snake_case , prices[j - 1] + max_rev[i - j] )
_lowerCAmelCase = max_revenue_i
return max_rev[n]
def _lowerCamelCase ( snake_case , snake_case ):
if n < 0:
_lowerCAmelCase = F'n must be greater than or equal to 0. Got n = {n}'
raise ValueError(snake_case )
if n > len(snake_case ):
_lowerCAmelCase = (
'Each integral piece of rod must have a corresponding price. '
F'Got n = {n} but length of prices = {len(snake_case )}'
)
raise ValueError(snake_case )
def _lowerCamelCase ( ):
_lowerCAmelCase = [6, 10, 12, 15, 20, 23]
_lowerCAmelCase = len(snake_case )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_lowerCAmelCase = 36
_lowerCAmelCase = top_down_cut_rod(snake_case , snake_case )
_lowerCAmelCase = bottom_up_cut_rod(snake_case , snake_case )
_lowerCAmelCase = naive_cut_rod_recursive(snake_case , snake_case )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 192 | import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class lowerCamelCase__ ( unittest.TestCase ):
def __init__( self : List[str] , lowercase__ : Any , lowercase__ : List[Any]=7 , lowercase__ : List[str]=3 , lowercase__ : str=18 , lowercase__ : List[Any]=30 , lowercase__ : Optional[int]=4_00 , lowercase__ : Dict=True , lowercase__ : List[str]=None , lowercase__ : int=True , lowercase__ : Tuple=None , lowercase__ : int=True , lowercase__ : Tuple=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , lowercase__ : Optional[int]=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , lowercase__ : Any=True , ):
_lowerCAmelCase = size if size is not None else {'height': 2_24, 'width': 2_24}
_lowerCAmelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean
_lowerCAmelCase = image_std
_lowerCAmelCase = do_convert_rgb
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def SCREAMING_SNAKE_CASE__ ( self : List[str] , lowercase__ : Tuple=False , lowercase__ : List[Any]=False , lowercase__ : str=False ):
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
_lowerCAmelCase = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
2_55 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
_lowerCAmelCase = []
for i in range(self.batch_size ):
_lowerCAmelCase , _lowerCAmelCase = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(2_55 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
_lowerCAmelCase = [Image.fromarray(np.moveaxis(lowercase__ , 0 , -1 ) ) for x in image_inputs]
if torchify:
_lowerCAmelCase = [torch.from_numpy(lowercase__ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class lowerCamelCase__ ( UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ =ChineseCLIPImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
_lowerCAmelCase = ChineseCLIPImageProcessingTester(self , do_center_crop=lowercase__ )
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ , 'do_resize' ) )
self.assertTrue(hasattr(lowercase__ , 'size' ) )
self.assertTrue(hasattr(lowercase__ , 'do_center_crop' ) )
self.assertTrue(hasattr(lowercase__ , 'center_crop' ) )
self.assertTrue(hasattr(lowercase__ , 'do_normalize' ) )
self.assertTrue(hasattr(lowercase__ , 'image_mean' ) )
self.assertTrue(hasattr(lowercase__ , 'image_std' ) )
self.assertTrue(hasattr(lowercase__ , 'do_convert_rgb' ) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 2_24, 'width': 2_24} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
pass
def SCREAMING_SNAKE_CASE__ ( self : str ):
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowercase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase__ , numpify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowercase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self : int ):
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase__ , torchify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowercase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
@require_torch
@require_vision
class lowerCamelCase__ ( UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ =ChineseCLIPImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
_lowerCAmelCase = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=lowercase__ )
_lowerCAmelCase = 3
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ , 'do_resize' ) )
self.assertTrue(hasattr(lowercase__ , 'size' ) )
self.assertTrue(hasattr(lowercase__ , 'do_center_crop' ) )
self.assertTrue(hasattr(lowercase__ , 'center_crop' ) )
self.assertTrue(hasattr(lowercase__ , 'do_normalize' ) )
self.assertTrue(hasattr(lowercase__ , 'image_mean' ) )
self.assertTrue(hasattr(lowercase__ , 'image_std' ) )
self.assertTrue(hasattr(lowercase__ , 'do_convert_rgb' ) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
pass
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowercase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 192 | 1 |
def __lowercase( __snake_case : List[Any] ,__snake_case : Dict ) -> Dict:
return x if y == 0 else greatest_common_divisor(SCREAMING_SNAKE_CASE_ ,x % y )
def __lowercase( __snake_case : Tuple ,__snake_case : Optional[int] ) -> Tuple:
return (x * y) // greatest_common_divisor(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def __lowercase( __snake_case : Optional[Any] = 20 ) -> Optional[int]:
__snake_case = 1
for i in range(1 ,n + 1 ):
__snake_case = lcm(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
return g
if __name__ == "__main__":
print(f"""{solution() = }""")
| 715 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class _lowerCamelCase (unittest.TestCase ):
def __lowerCamelCase ( self ):
__snake_case = tempfile.mkdtemp()
__snake_case = BlipImageProcessor()
__snake_case = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
__snake_case = BlipaProcessor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
def __lowerCamelCase ( self , **SCREAMING_SNAKE_CASE_ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ).tokenizer
def __lowerCamelCase ( self , **SCREAMING_SNAKE_CASE_ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ).image_processor
def __lowerCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __lowerCamelCase ( self ):
__snake_case = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCamelCase ( self ):
__snake_case = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__snake_case = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
__snake_case = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self ):
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = BlipaProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
__snake_case = self.prepare_image_inputs()
__snake_case = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__snake_case = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCamelCase ( self ):
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = BlipaProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
__snake_case = 'lower newer'
__snake_case = processor(text=SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCamelCase ( self ):
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = BlipaProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
__snake_case = 'lower newer'
__snake_case = self.prepare_image_inputs()
__snake_case = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
processor()
def __lowerCamelCase ( self ):
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = BlipaProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
__snake_case = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self ):
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = BlipaProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
__snake_case = 'lower newer'
__snake_case = self.prepare_image_inputs()
__snake_case = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
| 345 | 0 |
import argparse
import os
import re
import packaging.version
snake_case = """examples/"""
snake_case = {
"""examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""),
"""doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
snake_case = {
"""init""": """src/diffusers/__init__.py""",
"""setup""": """setup.py""",
}
snake_case = """README.md"""
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :Any , snake_case__ :Optional[int] ) -> Tuple:
with open(snake_case__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
_lowercase = f.read()
_lowercase , _lowercase = REPLACE_PATTERNS[pattern]
_lowercase = replace.replace('VERSION' , snake_case__ )
_lowercase = re_pattern.sub(snake_case__ , snake_case__ )
with open(snake_case__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Dict ) -> Optional[Any]:
for folder, directories, fnames in os.walk(snake_case__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(snake_case__ , snake_case__ ) , snake_case__ , pattern='examples' )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any , snake_case__ :Tuple=False ) -> List[Any]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(snake_case__ , snake_case__ , snake_case__ )
if not patch:
update_version_in_examples(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( ) -> int:
_lowercase = '🤗 Transformers currently provides the following architectures'
_lowercase = '1. Want to contribute a new model?'
with open(snake_case__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
_lowercase = f.readlines()
# Find the start of the list.
_lowercase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_lowercase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
_lowercase = lines[index].replace(
'https://huggingface.co/docs/diffusers/main/model_doc' , 'https://huggingface.co/docs/diffusers/model_doc' , )
index += 1
with open(snake_case__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
with open(REPLACE_FILES['init'] , 'r' ) as f:
_lowercase = f.read()
_lowercase = REPLACE_PATTERNS['init'][0].search(snake_case__ ).groups()[0]
return packaging.version.parse(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Tuple=False ) -> Tuple:
_lowercase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
_lowercase = default_version.base_version
elif patch:
_lowercase = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
_lowercase = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
_lowercase = input(F"""Which version are you releasing? [{default_version}]""" )
if len(snake_case__ ) == 0:
_lowercase = default_version
print(F"""Updating version to {version}.""" )
global_version_update(snake_case__ , patch=snake_case__ )
def SCREAMING_SNAKE_CASE__ ( ) -> Any:
_lowercase = get_version()
_lowercase = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
_lowercase = current_version.base_version
# Check with the user we got that right.
_lowercase = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(snake_case__ ) == 0:
_lowercase = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(snake_case__ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
snake_case = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work() | 67 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str , snake_case__ :str ) -> list:
_lowercase = len(snake_case__ )
_lowercase = []
for i in range(len(snake_case__ ) - pat_len + 1 ):
_lowercase = True
for j in range(snake_case__ ):
if s[i + j] != pattern[j]:
_lowercase = False
break
if match_found:
position.append(snake_case__ )
return position
if __name__ == "__main__":
assert naive_pattern_search("""ABCDEFG""", """DE""") == [3]
print(naive_pattern_search("""ABAAABCDBBABCDDEBCABC""", """ABC""")) | 67 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_snake_case : Dict = logging.get_logger(__name__)
_snake_case : List[Any] = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = """gpt_neo"""
a_ = ["""past_key_values"""]
a_ = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : Optional[int] , lowerCAmelCase_ : List[str]=5_0_2_5_7 , lowerCAmelCase_ : Dict=2_0_4_8 , lowerCAmelCase_ : Any=2_0_4_8 , lowerCAmelCase_ : Union[str, Any]=2_4 , lowerCAmelCase_ : Optional[Any]=[[["global", "local"], 1_2]] , lowerCAmelCase_ : Any=1_6 , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Optional[Any]=2_5_6 , lowerCAmelCase_ : Union[str, Any]="gelu_new" , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : Any=1e-5 , lowerCAmelCase_ : List[Any]=0.02 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : str=5_0_2_5_6 , lowerCAmelCase_ : Any=5_0_2_5_6 , **lowerCAmelCase_ : List[str] , ) -> List[Any]:
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_layers
__lowerCAmelCase = num_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = window_size
__lowerCAmelCase = activation_function
__lowerCAmelCase = resid_dropout
__lowerCAmelCase = embed_dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = classifier_dropout
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_range
__lowerCAmelCase = use_cache
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
__lowerCAmelCase = attention_types
__lowerCAmelCase = self.expand_attention_types_params(lowerCAmelCase_ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
f"""but is `len(config.attention_layers) = {len(self.attention_layers )}`, """
f"""`config.num_layers = {self.num_layers}`. """
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.' )
super().__init__(bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
@staticmethod
def lowercase ( lowerCAmelCase_ : Optional[Any] ) -> int:
__lowerCAmelCase = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def a_ ( lowerCAmelCase_ : Dict, lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Union[str, Any] ):
import torch
__lowerCAmelCase = input.size()
__lowerCAmelCase = len(lowerCAmelCase_ )
__lowerCAmelCase = shape[dimension]
__lowerCAmelCase = torch.arange(0, lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = torch.div(sizedim - size, lowerCAmelCase_, rounding_mode='floor' ) + 1
__lowerCAmelCase = torch.arange(lowerCAmelCase_ ) + low_indices[:min_length][:, None]
__lowerCAmelCase = [slice(lowerCAmelCase_ )] * rank
__lowerCAmelCase = indices
__lowerCAmelCase = input[s]
__lowerCAmelCase = list(range(0, rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : Optional[Any] ):
import torch
__lowerCAmelCase = torch.arange(1, lowerCAmelCase_ )
__lowerCAmelCase = torch.remainder(lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = remainders == 0
__lowerCAmelCase = candidates[divisor_indices]
__lowerCAmelCase = torch.max(lowerCAmelCase_ )
return largest_divisor, torch.div(lowerCAmelCase_, lowerCAmelCase_, rounding_mode='floor' )
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
@property
def lowercase ( self : str ) -> Mapping[str, Mapping[int, str]]:
__lowerCAmelCase = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase_ , direction='inputs' )
__lowerCAmelCase = {0: 'batch', 1: 'past_sequence + sequence'}
else:
__lowerCAmelCase = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def lowercase ( self : Optional[int] ) -> int:
return self._config.num_heads
def lowercase ( self : Optional[int] , lowerCAmelCase_ : PreTrainedTokenizer , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
__lowerCAmelCase = super(lowerCAmelCase_ , self ).generate_dummy_inputs(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ )
# We need to order the input in the way they appears in the forward()
__lowerCAmelCase = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowerCAmelCase , __lowerCAmelCase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__lowerCAmelCase = seqlen + 2
__lowerCAmelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowerCAmelCase = [
(torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ )) for _ in range(self.num_layers )
]
__lowerCAmelCase = common_inputs['attention_mask']
if self.use_past:
__lowerCAmelCase = ordered_inputs['attention_mask'].dtype
__lowerCAmelCase = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowerCAmelCase_ , lowerCAmelCase_ , dtype=lowerCAmelCase_ )] , dim=1 )
return ordered_inputs
@property
def lowercase ( self : Tuple ) -> int:
return 1_3
| 421 |
_snake_case : List[Any] = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 421 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.