code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
@slow
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
UpperCAmelCase_ = AutoTokenizer.from_pretrained("google/mt5-small" )
UpperCAmelCase_ = tokenizer("Hello there" , return_tensors="tf" ).input_ids
UpperCAmelCase_ = tokenizer("Hi I am" , return_tensors="tf" ).input_ids
UpperCAmelCase_ = model(__a , labels=__a ).loss
UpperCAmelCase_ = -tf.math.reduce_mean(__a ).numpy()
UpperCAmelCase_ = -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 78 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: int , _UpperCAmelCase: Any , _UpperCAmelCase: Tuple=13 , _UpperCAmelCase: Optional[Any]=32 , _UpperCAmelCase: List[Any]=2 , _UpperCAmelCase: Optional[int]=3 , _UpperCAmelCase: Optional[int]=16 , _UpperCAmelCase: Optional[Any]=[32, 64, 128] , _UpperCAmelCase: Optional[int]=[1, 2, 1] , _UpperCAmelCase: int=[2, 2, 4] , _UpperCAmelCase: List[str]=2 , _UpperCAmelCase: Dict=2.0 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: str=0.0 , _UpperCAmelCase: int=0.0 , _UpperCAmelCase: str=0.1 , _UpperCAmelCase: Dict="gelu" , _UpperCAmelCase: Optional[Any]=False , _UpperCAmelCase: Union[str, Any]=True , _UpperCAmelCase: Union[str, Any]=0.0_2 , _UpperCAmelCase: Optional[int]=1e-5 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: Optional[Any]=None , _UpperCAmelCase: Tuple=True , _UpperCAmelCase: str=10 , _UpperCAmelCase: int=8 , _UpperCAmelCase: List[Any]=["stage1", "stage2"] , _UpperCAmelCase: List[Any]=[1, 2] , ):
_lowerCAmelCase :Optional[int] = parent
_lowerCAmelCase :Dict = batch_size
_lowerCAmelCase :Optional[Any] = image_size
_lowerCAmelCase :Optional[Any] = patch_size
_lowerCAmelCase :List[Any] = num_channels
_lowerCAmelCase :Optional[int] = embed_dim
_lowerCAmelCase :List[str] = hidden_sizes
_lowerCAmelCase :Union[str, Any] = depths
_lowerCAmelCase :int = num_heads
_lowerCAmelCase :Any = window_size
_lowerCAmelCase :List[Any] = mlp_ratio
_lowerCAmelCase :Optional[int] = qkv_bias
_lowerCAmelCase :Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase :Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase :Dict = drop_path_rate
_lowerCAmelCase :List[Any] = hidden_act
_lowerCAmelCase :Tuple = use_absolute_embeddings
_lowerCAmelCase :Optional[int] = patch_norm
_lowerCAmelCase :Optional[Any] = layer_norm_eps
_lowerCAmelCase :Union[str, Any] = initializer_range
_lowerCAmelCase :List[str] = is_training
_lowerCAmelCase :str = scope
_lowerCAmelCase :Optional[int] = use_labels
_lowerCAmelCase :List[Any] = type_sequence_label_size
_lowerCAmelCase :Union[str, Any] = encoder_stride
_lowerCAmelCase :Optional[int] = out_features
_lowerCAmelCase :List[str] = out_indices
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase :Dict = None
if self.use_labels:
_lowerCAmelCase :List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase :str = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self: int ):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple ):
_lowerCAmelCase :List[Any] = FocalNetModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :List[str] = model(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCAmelCase :List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any] ):
_lowerCAmelCase :Union[str, Any] = FocalNetBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :str = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
_lowerCAmelCase :Optional[int] = None
_lowerCAmelCase :Dict = FocalNetBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Any = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: int , _UpperCAmelCase: Optional[Any] ):
_lowerCAmelCase :Any = FocalNetForMaskedImageModeling(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :str = model(_UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCAmelCase :List[Any] = 1
_lowerCAmelCase :List[Any] = FocalNetForMaskedImageModeling(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase :int = model(_UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: int , _UpperCAmelCase: Dict , _UpperCAmelCase: Optional[int] ):
_lowerCAmelCase :Union[str, Any] = self.type_sequence_label_size
_lowerCAmelCase :Dict = FocalNetForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase :Optional[int] = 1
_lowerCAmelCase :Tuple = FocalNetForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase :List[str] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :str = config_and_inputs
_lowerCAmelCase :List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ (snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase : Optional[Any] = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase : Tuple = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Any = False
lowerCamelCase : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = FocalNetModelTester(self )
_lowerCAmelCase :str = ConfigTester(self , config_class=_UpperCAmelCase , embed_dim=37 , has_text_modality=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
return
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def SCREAMING_SNAKE_CASE__ ( self: str ):
pass
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase , _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Optional[Any] = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase :Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase , _lowerCAmelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Tuple = model_class(_UpperCAmelCase )
_lowerCAmelCase :Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase :int = [*signature.parameters.keys()]
_lowerCAmelCase :List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: int , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Optional[int] ):
_lowerCAmelCase :Union[str, Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_lowerCAmelCase :Optional[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
_lowerCAmelCase :List[Any] = outputs.hidden_states
_lowerCAmelCase :str = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# FocalNet has a different seq_length
_lowerCAmelCase :Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase :List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
_lowerCAmelCase :List[str] = outputs.reshaped_hidden_states
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :int = reshaped_hidden_states[0].shape
_lowerCAmelCase :Optional[int] = (
reshaped_hidden_states[0].view(_UpperCAmelCase , _UpperCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase , _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Optional[int] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase :Dict = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase , _lowerCAmelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :str = 3
_lowerCAmelCase :Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCAmelCase :int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase :Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCAmelCase :Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :List[str] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase :Union[str, Any] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) )
@slow
def SCREAMING_SNAKE_CASE__ ( self: int ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase :List[Any] = FocalNetModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase , _lowerCAmelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :Optional[int] = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
_lowerCAmelCase :str = model_class(config=_UpperCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
# TODO update organization
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Tuple = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = self.default_image_processor
_lowerCAmelCase :Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowerCAmelCase :Any = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_lowerCAmelCase :Dict = model(**_UpperCAmelCase )
# verify the logits
_lowerCAmelCase :str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
_lowerCAmelCase :Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class UpperCAmelCase_ (snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : int = (FocalNetBackbone,) if is_torch_available() else ()
lowerCamelCase : str = FocalNetConfig
lowerCamelCase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Any = FocalNetModelTester(self ) | 687 | 0 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_a = logging.get_logger(__name__)
_a = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'van'
def __init__( self , __a=2_24 , __a=3 , __a=[7, 3, 3, 3] , __a=[4, 2, 2, 2] , __a=[64, 1_28, 3_20, 5_12] , __a=[3, 3, 12, 3] , __a=[8, 8, 4, 4] , __a="gelu" , __a=0.02 , __a=1e-6 , __a=1e-2 , __a=0.0 , __a=0.0 , **__a , ) -> Dict:
'''simple docstring'''
super().__init__(**__a)
_UpperCamelCase = image_size
_UpperCamelCase = num_channels
_UpperCamelCase = patch_sizes
_UpperCamelCase = strides
_UpperCamelCase = hidden_sizes
_UpperCamelCase = depths
_UpperCamelCase = mlp_ratios
_UpperCamelCase = hidden_act
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = layer_scale_init_value
_UpperCamelCase = drop_path_rate
_UpperCamelCase = dropout_rate
| 78 |
"""simple docstring"""
from math import sqrt
def lowerCamelCase__ ( __snake_case ) -> bool:
"""simple docstring"""
assert isinstance(__snake_case, __snake_case ) and (
number >= 0
), "'number' must been an int and positive"
_UpperCamelCase = True
# 0 and 1 are none primes.
if number <= 1:
_UpperCamelCase = False
for divisor in range(2, int(round(sqrt(__snake_case ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
_UpperCamelCase = False
break
# precondition
assert isinstance(__snake_case, __snake_case ), "'status' must been from type bool"
return status
def lowerCamelCase__ ( __snake_case ) -> Dict:
"""simple docstring"""
assert isinstance(__snake_case, __snake_case ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
_UpperCamelCase = list(range(2, n + 1 ) )
_UpperCamelCase = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(__snake_case ) ):
for j in range(i + 1, len(__snake_case ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
_UpperCamelCase = 0
# filters actual prime numbers.
_UpperCamelCase = [x for x in begin_list if x != 0]
# precondition
assert isinstance(__snake_case, __snake_case ), "'ans' must been from type list"
return ans
def lowerCamelCase__ ( __snake_case ) -> List[str]:
"""simple docstring"""
assert isinstance(__snake_case, __snake_case ) and (n > 2), "'N' must been an int and > 2"
_UpperCamelCase = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2, n + 1 ):
if is_prime(__snake_case ):
ans.append(__snake_case )
# precondition
assert isinstance(__snake_case, __snake_case ), "'ans' must been from type list"
return ans
def lowerCamelCase__ ( __snake_case ) -> Optional[int]:
"""simple docstring"""
assert isinstance(__snake_case, __snake_case ) and number >= 0, "'number' must been an int and >= 0"
_UpperCamelCase = [] # this list will be returns of the function.
# potential prime number factors.
_UpperCamelCase = 2
_UpperCamelCase = number
if number == 0 or number == 1:
ans.append(__snake_case )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(__snake_case ):
while quotient != 1:
if is_prime(__snake_case ) and (quotient % factor == 0):
ans.append(__snake_case )
quotient /= factor
else:
factor += 1
else:
ans.append(__snake_case )
# precondition
assert isinstance(__snake_case, __snake_case ), "'ans' must been from type list"
return ans
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(__snake_case, __snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
_UpperCamelCase = 0
# prime factorization of 'number'
_UpperCamelCase = prime_factorization(__snake_case )
_UpperCamelCase = max(__snake_case )
# precondition
assert isinstance(__snake_case, __snake_case ), "'ans' must been from type int"
return ans
def lowerCamelCase__ ( __snake_case ) -> int:
"""simple docstring"""
assert isinstance(__snake_case, __snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
_UpperCamelCase = 0
# prime factorization of 'number'
_UpperCamelCase = prime_factorization(__snake_case )
_UpperCamelCase = min(__snake_case )
# precondition
assert isinstance(__snake_case, __snake_case ), "'ans' must been from type int"
return ans
def lowerCamelCase__ ( __snake_case ) -> Union[str, Any]:
"""simple docstring"""
assert isinstance(__snake_case, __snake_case ), "'number' must been an int"
assert isinstance(number % 2 == 0, __snake_case ), "compare bust been from type bool"
return number % 2 == 0
def lowerCamelCase__ ( __snake_case ) -> Union[str, Any]:
"""simple docstring"""
assert isinstance(__snake_case, __snake_case ), "'number' must been an int"
assert isinstance(number % 2 != 0, __snake_case ), "compare bust been from type bool"
return number % 2 != 0
def lowerCamelCase__ ( __snake_case ) -> str:
"""simple docstring"""
assert (
isinstance(__snake_case, __snake_case ) and (number > 2) and is_even(__snake_case )
), "'number' must been an int, even and > 2"
_UpperCamelCase = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
_UpperCamelCase = get_prime_numbers(__snake_case )
_UpperCamelCase = len(__snake_case )
# run variable for while-loops.
_UpperCamelCase = 0
_UpperCamelCase = None
# exit variable. for break up the loops
_UpperCamelCase = True
while i < len_pn and loop:
_UpperCamelCase = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
_UpperCamelCase = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(__snake_case, __snake_case )
and (len(__snake_case ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCamelCase__ ( __snake_case, __snake_case ) -> str:
"""simple docstring"""
assert (
isinstance(__snake_case, __snake_case )
and isinstance(__snake_case, __snake_case )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
_UpperCamelCase = 0
while numbera != 0:
_UpperCamelCase = numbera % numbera
_UpperCamelCase = numbera
_UpperCamelCase = rest
# precondition
assert isinstance(__snake_case, __snake_case ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCamelCase__ ( __snake_case, __snake_case ) -> List[str]:
"""simple docstring"""
assert (
isinstance(__snake_case, __snake_case )
and isinstance(__snake_case, __snake_case )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
_UpperCamelCase = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
_UpperCamelCase = prime_factorization(__snake_case )
_UpperCamelCase = prime_factorization(__snake_case )
elif numbera == 1 or numbera == 1:
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = max(__snake_case, __snake_case )
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
_UpperCamelCase = prime_fac_a.count(__snake_case )
_UpperCamelCase = prime_fac_a.count(__snake_case )
for _ in range(max(__snake_case, __snake_case ) ):
ans *= n
else:
_UpperCamelCase = prime_fac_a.count(__snake_case )
for _ in range(__snake_case ):
ans *= n
done.append(__snake_case )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
_UpperCamelCase = prime_fac_a.count(__snake_case )
for _ in range(__snake_case ):
ans *= n
done.append(__snake_case )
# precondition
assert isinstance(__snake_case, __snake_case ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCamelCase__ ( __snake_case ) -> Tuple:
"""simple docstring"""
assert isinstance(__snake_case, __snake_case ) and (n >= 0), "'number' must been a positive int"
_UpperCamelCase = 0
_UpperCamelCase = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(__snake_case ):
ans += 1
# precondition
assert isinstance(__snake_case, __snake_case ) and is_prime(
__snake_case ), "'ans' must been a prime number and from type int"
return ans
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Tuple:
"""simple docstring"""
assert (
is_prime(__snake_case ) and is_prime(__snake_case ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
_UpperCamelCase = p_number_a + 1 # jump to the next number
_UpperCamelCase = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(__snake_case ):
number += 1
while number < p_number_a:
ans.append(__snake_case )
number += 1
# fetch the next prime number.
while not is_prime(__snake_case ):
number += 1
# precondition
assert (
isinstance(__snake_case, __snake_case )
and ans[0] != p_number_a
and ans[len(__snake_case ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCamelCase__ ( __snake_case ) -> List[Any]:
"""simple docstring"""
assert isinstance(__snake_case, __snake_case ) and (n >= 1), "'n' must been int and >= 1"
_UpperCamelCase = [] # will be returned.
for divisor in range(1, n + 1 ):
if n % divisor == 0:
ans.append(__snake_case )
# precondition
assert ans[0] == 1 and ans[len(__snake_case ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCamelCase__ ( __snake_case ) -> str:
"""simple docstring"""
assert isinstance(__snake_case, __snake_case ) and (
number > 1
), "'number' must been an int and >= 1"
_UpperCamelCase = get_divisors(__snake_case )
# precondition
assert (
isinstance(__snake_case, __snake_case )
and (divisors[0] == 1)
and (divisors[len(__snake_case ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Optional[Any]:
"""simple docstring"""
assert (
isinstance(__snake_case, __snake_case )
and isinstance(__snake_case, __snake_case )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
_UpperCamelCase = gcd(abs(__snake_case ), abs(__snake_case ) )
# precondition
assert (
isinstance(__snake_case, __snake_case )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(__snake_case, __snake_case ) and (n >= 0), "'n' must been a int and >= 0"
_UpperCamelCase = 1 # this will be return.
for factor in range(1, n + 1 ):
ans *= factor
return ans
def lowerCamelCase__ ( __snake_case ) -> Union[str, Any]:
"""simple docstring"""
assert isinstance(__snake_case, __snake_case ) and (n >= 0), "'n' must been an int and >= 0"
_UpperCamelCase = 0
_UpperCamelCase = 1
_UpperCamelCase = 1 # this will be return
for _ in range(n - 1 ):
_UpperCamelCase = ans
ans += fiba
_UpperCamelCase = tmp
return ans
| 78 | 1 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _a ( lowercase__ : NDArray[floataa] , lowercase__ : NDArray[floataa] , lowercase__ : list[int] , lowercase__ : int , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = coefficient_matrix.shape
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = constant_matrix.shape
if rowsa != colsa:
SCREAMING_SNAKE_CASE__ : int = f'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(lowercase__ )
if colsa != 1:
SCREAMING_SNAKE_CASE__ : Optional[int] = f'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(lowercase__ )
if rowsa != rowsa:
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
'Coefficient and constant matrices dimensions must be nxn and nx1 but '
f'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(lowercase__ )
if len(lowercase__ ) != rowsa:
SCREAMING_SNAKE_CASE__ : int = (
'Number of initial values must be equal to number of rows in coefficient '
f'''matrix but received {len(lowercase__ )} and {rowsa}'''
)
raise ValueError(lowercase__ )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
SCREAMING_SNAKE_CASE__ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = table.shape
strictly_diagonally_dominant(lowercase__ )
# Iterates the whole matrix for given number of times
for _ in range(lowercase__ ):
SCREAMING_SNAKE_CASE__ : Tuple = []
for row in range(lowercase__ ):
SCREAMING_SNAKE_CASE__ : Dict = 0
for col in range(lowercase__ ):
if col == row:
SCREAMING_SNAKE_CASE__ : Any = table[row][col]
elif col == cols - 1:
SCREAMING_SNAKE_CASE__ : Dict = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
SCREAMING_SNAKE_CASE__ : Tuple = (temp + val) / denom
new_val.append(lowercase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = new_val
return [float(lowercase__ ) for i in new_val]
def _a ( lowercase__ : NDArray[floataa] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = table.shape
SCREAMING_SNAKE_CASE__ : List[Any] = True
for i in range(0 , lowercase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
'''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''],
'''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''BertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BertForMaskedLM''',
'''BertForMultipleChoice''',
'''BertForNextSentencePrediction''',
'''BertForPreTraining''',
'''BertForQuestionAnswering''',
'''BertForSequenceClassification''',
'''BertForTokenClassification''',
'''BertLayer''',
'''BertLMHeadModel''',
'''BertModel''',
'''BertPreTrainedModel''',
'''load_tf_weights_in_bert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBertEmbeddings''',
'''TFBertForMaskedLM''',
'''TFBertForMultipleChoice''',
'''TFBertForNextSentencePrediction''',
'''TFBertForPreTraining''',
'''TFBertForQuestionAnswering''',
'''TFBertForSequenceClassification''',
'''TFBertForTokenClassification''',
'''TFBertLMHeadModel''',
'''TFBertMainLayer''',
'''TFBertModel''',
'''TFBertPreTrainedModel''',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''TFBertTokenizer''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxBertForCausalLM''',
'''FlaxBertForMaskedLM''',
'''FlaxBertForMultipleChoice''',
'''FlaxBertForNextSentencePrediction''',
'''FlaxBertForPreTraining''',
'''FlaxBertForQuestionAnswering''',
'''FlaxBertForSequenceClassification''',
'''FlaxBertForTokenClassification''',
'''FlaxBertModel''',
'''FlaxBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 677 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCAmelCase__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = ['pixel_values']
def __init__( self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , **lowercase , ) -> None:
'''simple docstring'''
super().__init__(**lowercase )
A__ = size if size is not None else {"shortest_edge": 224}
A__ = get_size_dict(lowercase , default_to_square=lowercase )
A__ = crop_size if crop_size is not None else {"height": 224, "width": 224}
A__ = get_size_dict(lowercase , default_to_square=lowercase , param_name="crop_size" )
A__ = do_resize
A__ = size
A__ = resample
A__ = do_center_crop
A__ = crop_size
A__ = do_rescale
A__ = rescale_factor
A__ = do_normalize
A__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A__ = image_std if image_std is not None else OPENAI_CLIP_STD
A__ = do_convert_rgb
def UpperCamelCase ( self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ) -> np.ndarray:
'''simple docstring'''
A__ = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A__ = get_resize_output_image_size(lowercase , size=size["shortest_edge"] , default_to_square=lowercase )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def UpperCamelCase ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray:
'''simple docstring'''
A__ = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowercase , size=(size["height"], size["width"]) , data_format=lowercase , **lowercase )
def UpperCamelCase ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> List[str]:
'''simple docstring'''
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ) -> PIL.Image.Image:
'''simple docstring'''
A__ = do_resize if do_resize is not None else self.do_resize
A__ = size if size is not None else self.size
A__ = get_size_dict(lowercase , param_name="size" , default_to_square=lowercase )
A__ = resample if resample is not None else self.resample
A__ = do_center_crop if do_center_crop is not None else self.do_center_crop
A__ = crop_size if crop_size is not None else self.crop_size
A__ = get_size_dict(lowercase , param_name="crop_size" , default_to_square=lowercase )
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = image_mean if image_mean is not None else self.image_mean
A__ = image_std if image_std is not None else self.image_std
A__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A__ = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A__ = [convert_to_rgb(lowercase ) for image in images]
# All transformations expect numpy arrays.
A__ = [to_numpy_array(lowercase ) for image in images]
if do_resize:
A__ = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
A__ = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
A__ = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
A__ = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
A__ = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
A__ = {"pixel_values": images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
| 712 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: str=1_0_2_4 ) -> Any:
'''simple docstring'''
A__ , A__ = [], []
A__ = list(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
A__ , A__ = sorted_examples[0]
def is_too_big(SCREAMING_SNAKE_CASE_: List[str] ):
return tok(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
A__ = new_src + " " + src
A__ = new_tgt + " " + tgt
if is_too_big(SCREAMING_SNAKE_CASE_ ) or is_too_big(SCREAMING_SNAKE_CASE_ ): # cant fit, finalize example
finished_src.append(SCREAMING_SNAKE_CASE_ )
finished_tgt.append(SCREAMING_SNAKE_CASE_ )
A__ , A__ = src, tgt
else: # can fit, keep adding
A__ , A__ = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(SCREAMING_SNAKE_CASE_ )
finished_tgt.append(SCREAMING_SNAKE_CASE_ )
return finished_src, finished_tgt
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Path , SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: Tuple ) -> Union[str, Any]:
'''simple docstring'''
A__ = Path(SCREAMING_SNAKE_CASE_ )
save_path.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
for split in ["train"]:
A__ , A__ = data_dir / F'{split}.source', data_dir / F'{split}.target'
A__ = [x.rstrip() for x in Path(SCREAMING_SNAKE_CASE_ ).open().readlines()]
A__ = [x.rstrip() for x in Path(SCREAMING_SNAKE_CASE_ ).open().readlines()]
A__ , A__ = pack_examples(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(F'packed {split} split from {len(SCREAMING_SNAKE_CASE_ )} examples -> {len(SCREAMING_SNAKE_CASE_ )}.' )
Path(save_path / F'{split}.source' ).open("w" ).write("\n".join(SCREAMING_SNAKE_CASE_ ) )
Path(save_path / F'{split}.target' ).open("w" ).write("\n".join(SCREAMING_SNAKE_CASE_ ) )
for split in ["val", "test"]:
A__ , A__ = data_dir / F'{split}.source', data_dir / F'{split}.target'
shutil.copyfile(SCREAMING_SNAKE_CASE_ , save_path / F'{split}.source' )
shutil.copyfile(SCREAMING_SNAKE_CASE_ , save_path / F'{split}.target' )
def lowerCAmelCase__ ( ) -> Optional[int]:
'''simple docstring'''
A__ = argparse.ArgumentParser()
parser.add_argument("--tok_name" , type=SCREAMING_SNAKE_CASE_ , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len" , type=SCREAMING_SNAKE_CASE_ , default=1_2_8 )
parser.add_argument("--data_dir" , type=SCREAMING_SNAKE_CASE_ )
parser.add_argument("--save_path" , type=SCREAMING_SNAKE_CASE_ )
A__ = parser.parse_args()
A__ = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(SCREAMING_SNAKE_CASE_ , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 626 | 0 |
from numpy import exp, pi, sqrt
def A__ ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : float = 0.0 , SCREAMING_SNAKE_CASE_ : float = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 |
"""simple docstring"""
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def _A ( _a : Optional[Any] , _a : Optional[Any] ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
A = flax_key_tuple[:-1] + ("""weight""",)
A = torch.permute(_a , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_a ):
# linear layer
A = flax_key_tuple[:-1] + ("""weight""",)
A = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
A = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def _A ( _a : Tuple , _a : Any , _a : Any ):
"""simple docstring"""
if "metadata" in layer:
A = layer.split("""metadata""" )
A = """""".join(split_layer[0] )[:-1]
A = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
A = layer.split("""kvstore""" )
A = """""".join(split_layer[0] )[:-1]
A = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
A = layer.split("""/""" )
A = """/""".join(split_layer[:-1] )
A = (split_layer[-1],)
if "kvstore/path" in layer:
A = f'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
A = """file"""
else:
A = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def _A ( _a : Optional[Any] , _a : Optional[int] ):
"""simple docstring"""
A = rename_keys(_a )
A = {}
for k, v in current_block.items():
A = v
A = new_current_block
torch.save(_a , _a )
def _A ( _a : str , _a : Optional[Any] , _a : int , _a : Optional[int] , _a : str = WEIGHTS_NAME ):
"""simple docstring"""
A = convert_file_size_to_int(_a )
A = []
A = {}
A = 0
A = 0
os.makedirs(_a , exist_ok=_a )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
A = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
A = flatten_dict(_a , sep="""/""" )
A = {}
for layer in checkpoint_info.keys():
A , A , A = get_key_and_tensorstore_dict(
_a , _a , _a )
if curr_real_layer_name in all_layers:
A = content
else:
A = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
A = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
A = torch.tensor(_a )
A = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
A , A = rename_base_flax_keys(tuple(key.split("""/""" ) ) , _a )
A = """/""".join(_a )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
A = os.path.join(
_a , weights_name.replace(""".bin""" , f'-{len(_a )+1:05d}-of-???.bin' ) )
rename_and_save_block(_a , _a )
sharded_state_dicts.append(current_block.keys() )
del current_block
A = {}
A = 0
A = raw_weights.to(getattr(_a , _a ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
A = os.path.join(_a , weights_name.replace(""".bin""" , f'-{len(_a )+1:05d}-of-???.bin' ) )
rename_and_save_block(_a , _a )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_a ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
A = {}
A = {}
for idx, shard in enumerate(_a ):
A = weights_name.replace(
""".bin""" , f'-{idx+1:05d}-of-{len(_a ):05d}.bin' ) # len(sharded_state_dicts):05d}
A = os.path.join(_a , weights_name.replace(""".bin""" , f'-{idx+1:05d}-of-???.bin' ) )
os.rename(_a , os.path.join(_a , _a ) )
A = shard
for key in shard:
A = shard_file
# Add the metadata
A = {"""total_size""": total_size}
A = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(_a , _a ) , """w""" , encoding="""utf-8""" ) as f:
A = json.dumps(_a , indent=2 , sort_keys=_a ) + """\n"""
f.write(_a )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
UpperCAmelCase =parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def _A ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
A = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
A = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
A = TaTokenizer.from_pretrained("""t5-small""" )
A = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
A = tokenizer(_a , return_tensors="""pt""" ).input_ids
A = model.generate(_a , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 617 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__snake_case ) , """Tatoeba directory does not exist.""" )
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self :List[str] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__UpperCamelCase )
@slow
def __lowerCAmelCase ( self :List[str] ) -> List[Any]:
'''simple docstring'''
self.resolver.convert_models(['''heb-eng'''] )
@slow
def __lowerCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=__UpperCamelCase )
assert mmeta["long_pair"] == "heb-eng"
| 701 |
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
lowerCamelCase__ : Optional[Any] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __A ( a_ : Optional[int] )-> Dict:
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __A ( a_ : List[Any] , a_ : Optional[int] , a_ : Optional[int] )-> Dict:
'''simple docstring'''
return max(metric_fn(a_ , a_ ) for gt in ground_truths )
def __A ( a_ : List[Any] , a_ : Union[str, Any] , a_ : str )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Optional[Any] = []
if args.gold_data_mode == "qa":
SCREAMING_SNAKE_CASE : List[Any] = pd.read_csv(a_ , sep='''\t''' , header=a_ )
for answer_list in data[1]:
SCREAMING_SNAKE_CASE : str = ast.literal_eval(a_ )
answers.append(a_ )
else:
SCREAMING_SNAKE_CASE : Any = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Dict = [[reference] for reference in references]
SCREAMING_SNAKE_CASE : Dict = 0
for prediction, ground_truths in zip(a_ , a_ ):
total += 1
em += metric_max_over_ground_truths(a_ , a_ , a_ )
fa += metric_max_over_ground_truths(a_ , a_ , a_ )
SCREAMING_SNAKE_CASE : Any = 100.0 * em / total
SCREAMING_SNAKE_CASE : Optional[int] = 100.0 * fa / total
logger.info(F"F1: {fa:.2f}" )
logger.info(F"EM: {em:.2f}" )
def __A ( a_ : Any , a_ : Any , a_ : List[Any] )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = args.k
SCREAMING_SNAKE_CASE : Tuple = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Union[str, Any] = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Dict = 0
for hypo, reference in zip(a_ , a_ ):
SCREAMING_SNAKE_CASE : Optional[int] = set(hypo.split('''\t''' )[:k] )
SCREAMING_SNAKE_CASE : List[str] = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
SCREAMING_SNAKE_CASE : Dict = 100.0 * em / total
logger.info(F"Precision@{k}: {em: .2f}" )
def __A ( a_ : Any , a_ : List[str] , a_ : str )-> int:
'''simple docstring'''
def strip_title(a_ : Optional[Any] ):
if title.startswith('''"''' ):
SCREAMING_SNAKE_CASE : Tuple = title[1:]
if title.endswith('''"''' ):
SCREAMING_SNAKE_CASE : Any = title[:-1]
return title
SCREAMING_SNAKE_CASE : Tuple = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors='''pt''' , padding=a_ , truncation=a_ , )['''input_ids'''].to(args.device )
SCREAMING_SNAKE_CASE : Any = rag_model.rag.question_encoder(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = question_enc_outputs[0]
SCREAMING_SNAKE_CASE : Dict = rag_model.retriever(
a_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE : Any = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
SCREAMING_SNAKE_CASE : Dict = []
for docs in all_docs:
SCREAMING_SNAKE_CASE : List[Any] = [strip_title(a_ ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(a_ ) )
return provenance_strings
def __A ( a_ : List[Any] , a_ : int , a_ : str )-> Tuple:
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors='''pt''' , padding=a_ , truncation=a_ )
SCREAMING_SNAKE_CASE : Dict = inputs_dict.input_ids.to(args.device )
SCREAMING_SNAKE_CASE : Any = inputs_dict.attention_mask.to(args.device )
SCREAMING_SNAKE_CASE : Tuple = rag_model.generate( # rag_model overwrites generate
a_ , attention_mask=a_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=a_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
SCREAMING_SNAKE_CASE : Dict = rag_model.retriever.generator_tokenizer.batch_decode(a_ , skip_special_tokens=a_ )
if args.print_predictions:
for q, a in zip(a_ , a_ ):
logger.info('''Q: {} - A: {}'''.format(a_ , a_ ) )
return answers
def __A ( )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=a_ , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=a_ , choices=['''exact''', '''compressed''', '''legacy'''] , type=a_ , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=a_ , type=a_ , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=a_ , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=a_ , type=a_ , required=a_ , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=a_ , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=a_ , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=a_ , type=a_ , required=a_ , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=a_ , type=a_ , required=a_ , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=a_ , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=a_ , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=a_ , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=a_ , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=a_ , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=a_ , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
SCREAMING_SNAKE_CASE : Dict = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def __A ( a_ : Optional[Any] )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = {}
if args.model_type is None:
SCREAMING_SNAKE_CASE : List[str] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE : List[str] = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
SCREAMING_SNAKE_CASE : Optional[Any] = args.n_docs
if args.index_name is not None:
SCREAMING_SNAKE_CASE : Tuple = args.index_name
if args.index_path is not None:
SCREAMING_SNAKE_CASE : List[Any] = args.index_path
else:
SCREAMING_SNAKE_CASE : str = BartForConditionalGeneration
SCREAMING_SNAKE_CASE : Optional[int] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , a_ )
SCREAMING_SNAKE_CASE : int = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
SCREAMING_SNAKE_CASE : str = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(a_ , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(a_ ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE : Dict = RagRetriever.from_pretrained(a_ , **a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class.from_pretrained(a_ , retriever=a_ , **a_ )
model.retriever.init_retrieval()
else:
SCREAMING_SNAKE_CASE : str = model_class.from_pretrained(a_ , **a_ )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
SCREAMING_SNAKE_CASE : Dict = []
for line in tqdm(a_ ):
questions.append(line.strip() )
if len(a_ ) == args.eval_batch_size:
SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(a_ , a_ , a_ )
preds_file.write('''\n'''.join(a_ ) + '''\n''' )
preds_file.flush()
SCREAMING_SNAKE_CASE : Union[str, Any] = []
if len(a_ ) > 0:
SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(a_ , a_ , a_ )
preds_file.write('''\n'''.join(a_ ) )
preds_file.flush()
score_fn(a_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = get_args()
main(args)
| 18 | 0 |
import qiskit
def __UpperCAmelCase( lowercase_ , lowercase_ ):
_lowerCamelCase : Dict = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
_lowerCamelCase : List[Any] = qiskit.QuantumCircuit(lowercase_ , lowercase_ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_lowerCamelCase : List[Any] = qiskit.execute(lowercase_ , lowercase_ , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase_ )
if __name__ == "__main__":
_lowerCamelCase = single_qubit_measure(2, 2)
print(F'''Total count for various states are: {counts}''')
| 114 |
import os
def __UpperCAmelCase( ):
with open(os.path.dirname(lowercase_ ) + '''/p022_names.txt''' ) as file:
_lowerCamelCase : Optional[int] = str(file.readlines()[0] )
_lowerCamelCase : List[Any] = names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : Tuple = 0
for i, name in enumerate(lowercase_ ):
for letter in name:
name_score += ord(lowercase_ ) - 64
total_score += (i + 1) * name_score
_lowerCamelCase : Optional[int] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 114 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple ,lowercase__ : List[str] ,lowercase__ : Any=7 ,lowercase__ : Optional[Any]=3 ,lowercase__ : Optional[Any]=1_0 ,lowercase__ : Any=1_8 ,lowercase__ : Optional[int]=3_0 ,lowercase__ : List[str]=4_0_0 ,lowercase__ : Optional[int]=True ,lowercase__ : str=None ,lowercase__ : Dict=True ,lowercase__ : int=[0.5, 0.5, 0.5] ,lowercase__ : List[str]=[0.5, 0.5, 0.5] ,lowercase__ : Tuple=None ,):
__lowercase = size if size is not None else {'shortest_edge': 1_8}
__lowercase = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = num_frames
__lowercase = image_size
__lowercase = min_resolution
__lowercase = max_resolution
__lowercase = do_resize
__lowercase = size
__lowercase = do_normalize
__lowercase = image_mean
__lowercase = image_std
__lowercase = crop_size
def SCREAMING_SNAKE_CASE ( self : Tuple ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase_ (__lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = VivitImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = VivitImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ ,'''image_mean''' ) )
self.assertTrue(hasattr(UpperCAmelCase_ ,'''image_std''' ) )
self.assertTrue(hasattr(UpperCAmelCase_ ,'''do_normalize''' ) )
self.assertTrue(hasattr(UpperCAmelCase_ ,'''do_resize''' ) )
self.assertTrue(hasattr(UpperCAmelCase_ ,'''do_center_crop''' ) )
self.assertTrue(hasattr(UpperCAmelCase_ ,'''size''' ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 1_8} )
self.assertEqual(image_processor.crop_size ,{'''height''': 1_8, '''width''': 1_8} )
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict ,size=4_2 ,crop_size=8_4 )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size ,{'''height''': 8_4, '''width''': 8_4} )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
# Initialize image_processing
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
__lowercase = prepare_video_inputs(self.image_processor_tester ,equal_resolution=UpperCAmelCase_ )
for video in video_inputs:
self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ )
self.assertIsInstance(video[0] ,Image.Image )
# Test not batched input
__lowercase = image_processing(video_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape ,(
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
__lowercase = image_processing(UpperCAmelCase_ ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
# Initialize image_processing
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase = prepare_video_inputs(self.image_processor_tester ,equal_resolution=UpperCAmelCase_ ,numpify=UpperCAmelCase_ )
for video in video_inputs:
self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ )
self.assertIsInstance(video[0] ,np.ndarray )
# Test not batched input
__lowercase = image_processing(video_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape ,(
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
__lowercase = image_processing(UpperCAmelCase_ ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def SCREAMING_SNAKE_CASE ( self : Tuple ):
# Initialize image_processing
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_video_inputs(self.image_processor_tester ,equal_resolution=UpperCAmelCase_ ,torchify=UpperCAmelCase_ )
for video in video_inputs:
self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ )
self.assertIsInstance(video[0] ,torch.Tensor )
# Test not batched input
__lowercase = image_processing(video_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape ,(
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
__lowercase = image_processing(UpperCAmelCase_ ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
| 706 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def _A ( A__ ):
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@staticmethod
def SCREAMING_SNAKE_CASE ( lowercase__ : ArgumentParser ):
__lowercase = parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''' ,type=lowercase__ ,default=lowercase__ ,help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''' ,action='''store_true''' ,help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''' ,action='''store_true''' ,help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''' ,)
download_parser.add_argument('''model''' ,type=lowercase__ ,help='''Name of the model to download''' )
download_parser.set_defaults(func=lowercase__ )
def __init__( self : str ,lowercase__ : str ,lowercase__ : str ,lowercase__ : bool ,lowercase__ : bool ):
__lowercase = model
__lowercase = cache
__lowercase = force
__lowercase = trust_remote_code
def SCREAMING_SNAKE_CASE ( self : Any ):
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
| 624 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
_lowercase = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _lowercase=False , _lowercase=False , _lowercase=6.0 , _lowercase=None , _lowercase=False , _lowercase=False , _lowercase=None , _lowercase="fp4" , _lowercase=False , **_lowercase , ):
"""simple docstring"""
_lowerCAmelCase = load_in_abit
_lowerCAmelCase = load_in_abit
_lowerCAmelCase = llm_inta_threshold
_lowerCAmelCase = llm_inta_skip_modules
_lowerCAmelCase = llm_inta_enable_fpaa_cpu_offload
_lowerCAmelCase = llm_inta_has_fpaa_weight
_lowerCAmelCase = bnb_abit_quant_type
_lowerCAmelCase = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
_lowerCAmelCase = torch.floataa
elif isinstance(_lowercase , _lowercase ):
_lowerCAmelCase = getattr(_lowercase , _lowercase )
elif isinstance(_lowercase , torch.dtype ):
_lowerCAmelCase = bnb_abit_compute_dtype
else:
raise ValueError("""bnb_4bit_compute_dtype must be a string or a torch.dtype""" )
self.post_init()
def _lowercase ( self ):
"""simple docstring"""
if not isinstance(self.llm_inta_threshold , _lowercase ):
raise ValueError("""llm_int8_threshold must be a float""" )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , _lowercase ):
raise ValueError("""llm_int8_skip_modules must be a list of strings""" )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , _lowercase ):
raise ValueError("""llm_int8_enable_fp32_cpu_offload must be a boolean""" )
if not isinstance(self.llm_inta_has_fpaa_weight , _lowercase ):
raise ValueError("""llm_int8_has_fp16_weight must be a boolean""" )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError("""bnb_4bit_compute_dtype must be torch.dtype""" )
if not isinstance(self.bnb_abit_quant_type , _lowercase ):
raise ValueError("""bnb_4bit_quant_type must be a string""" )
if not isinstance(self.bnb_abit_use_double_quant , _lowercase ):
raise ValueError("""bnb_4bit_use_double_quant must be a boolean""" )
if self.load_in_abit and not version.parse(importlib.metadata.version("""bitsandbytes""" ) ) >= version.parse(
"""0.39.0""" ):
raise ValueError(
"""4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version""" )
def _lowercase ( self ):
"""simple docstring"""
return self.load_in_abit or self.load_in_abit
def _lowercase ( self ):
"""simple docstring"""
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def _lowercase ( cls , _lowercase , _lowercase , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = cls(**_lowercase )
_lowerCAmelCase = []
for key, value in kwargs.items():
if hasattr(_lowercase , _lowercase ):
setattr(_lowercase , _lowercase , _lowercase )
to_remove.append(_lowercase )
for key in to_remove:
kwargs.pop(_lowercase , _lowercase )
if return_unused_kwargs:
return config, kwargs
else:
return config
def _lowercase ( self , _lowercase ):
"""simple docstring"""
with open(_lowercase , """w""" , encoding="""utf-8""" ) as writer:
_lowerCAmelCase = self.to_dict()
_lowerCAmelCase = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase ) + '''\n'''
writer.write(_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = copy.deepcopy(self.__dict__ )
_lowerCAmelCase = str(output["""bnb_4bit_compute_dtype"""] ).split(""".""" )[1]
return output
def __repr__( self ):
"""simple docstring"""
return F'{self.__class__.__name__} {self.to_json_string()}'
def _lowercase ( self , _lowercase = True ):
"""simple docstring"""
if use_diff is True:
_lowerCAmelCase = self.to_diff_dict()
else:
_lowerCAmelCase = self.to_dict()
return json.dumps(_lowercase , indent=2 , sort_keys=_lowercase ) + "\n"
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.to_dict()
# get the default config dict
_lowerCAmelCase = BitsAndBytesConfig().to_dict()
_lowerCAmelCase = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
_lowerCAmelCase = value
return serializable_config_dict
| 5 |
def lowerCAmelCase_ ( __UpperCAmelCase: float ) -> float:
return 10 - x * x
def lowerCAmelCase_ ( __UpperCAmelCase: float , __UpperCAmelCase: float ) -> float:
# Bolzano theory in order to find if there is a root between a and b
if equation(__UpperCAmelCase ) * equation(__UpperCAmelCase ) >= 0:
raise ValueError('''Wrong space!''' )
UpperCamelCase__ : Optional[int] = a
while (b - a) >= 0.01:
# Find middle point
UpperCamelCase__ : int = (a + b) / 2
# Check if middle point is root
if equation(__UpperCAmelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__UpperCAmelCase ) * equation(__UpperCAmelCase ) < 0:
UpperCamelCase__ : str = c
else:
UpperCamelCase__ : Optional[int] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 253 | 0 |
"""simple docstring"""
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__lowercase : List[Any] = collections.namedtuple("_Datasets", ["train", "validation", "test"])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__lowercase : int = "https://storage.googleapis.com/cvdf-datasets/mnist/"
def SCREAMING_SNAKE_CASE ( snake_case):
__snake_case = numpy.dtype(numpy.uintaa).newbyteorder('''>''')
return numpy.frombuffer(bytestream.read(4), dtype=snake_case)[0]
@deprecated(snake_case, '''Please use tf.data to implement this functionality.''')
def SCREAMING_SNAKE_CASE ( snake_case):
print('''Extracting''', f.name)
with gzip.GzipFile(fileobj=snake_case) as bytestream:
__snake_case = _readaa(snake_case)
if magic != 20_51:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name))
__snake_case = _readaa(snake_case)
__snake_case = _readaa(snake_case)
__snake_case = _readaa(snake_case)
__snake_case = bytestream.read(rows * cols * num_images)
__snake_case = numpy.frombuffer(snake_case, dtype=numpy.uinta)
__snake_case = data.reshape(snake_case, snake_case, snake_case, 1)
return data
@deprecated(snake_case, '''Please use tf.one_hot on tensors.''')
def SCREAMING_SNAKE_CASE ( snake_case, snake_case):
__snake_case = labels_dense.shape[0]
__snake_case = numpy.arange(snake_case) * num_classes
__snake_case = numpy.zeros((num_labels, num_classes))
__snake_case = 1
return labels_one_hot
@deprecated(snake_case, '''Please use tf.data to implement this functionality.''')
def SCREAMING_SNAKE_CASE ( snake_case, snake_case=False, snake_case=10):
print('''Extracting''', f.name)
with gzip.GzipFile(fileobj=snake_case) as bytestream:
__snake_case = _readaa(snake_case)
if magic != 20_49:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name))
__snake_case = _readaa(snake_case)
__snake_case = bytestream.read(snake_case)
__snake_case = numpy.frombuffer(snake_case, dtype=numpy.uinta)
if one_hot:
return _dense_to_one_hot(snake_case, snake_case)
return labels
class _A :
"""simple docstring"""
@deprecated(
A_ , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self : List[str] , A_ : int , A_ : Tuple , A_ : Optional[int]=False , A_ : int=False , A_ : Dict=dtypes.floataa , A_ : Tuple=True , A_ : Any=None , ) -> Any:
__snake_case , __snake_case = random_seed.get_seed(A_ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
__snake_case = dtypes.as_dtype(A_ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
__snake_case = 10_000
__snake_case = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f"images.shape: {images.shape} labels.shape: {labels.shape}"
__snake_case = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__snake_case = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__snake_case = images.astype(numpy.floataa )
__snake_case = numpy.multiply(A_ , 1.0 / 255.0 )
__snake_case = images
__snake_case = labels
__snake_case = 0
__snake_case = 0
@property
def lowercase ( self : int ) -> Dict:
return self._images
@property
def lowercase ( self : Dict ) -> Optional[int]:
return self._labels
@property
def lowercase ( self : int ) -> Tuple:
return self._num_examples
@property
def lowercase ( self : int ) -> Tuple:
return self._epochs_completed
def lowercase ( self : int , A_ : Optional[int] , A_ : Tuple=False , A_ : Tuple=True ) -> List[Any]:
if fake_data:
__snake_case = [1] * 784
__snake_case = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(A_ )],
[fake_label for _ in range(A_ )],
)
__snake_case = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__snake_case = numpy.arange(self._num_examples )
numpy.random.shuffle(A_ )
__snake_case = self.images[perma]
__snake_case = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__snake_case = self._num_examples - start
__snake_case = self._images[start : self._num_examples]
__snake_case = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__snake_case = numpy.arange(self._num_examples )
numpy.random.shuffle(A_ )
__snake_case = self.images[perm]
__snake_case = self.labels[perm]
# Start next epoch
__snake_case = 0
__snake_case = batch_size - rest_num_examples
__snake_case = self._index_in_epoch
__snake_case = self._images[start:end]
__snake_case = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
__snake_case = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(snake_case, '''Please write your own downloading logic.''')
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case):
if not gfile.Exists(snake_case):
gfile.MakeDirs(snake_case)
__snake_case = os.path.join(snake_case, snake_case)
if not gfile.Exists(snake_case):
urllib.request.urlretrieve(snake_case, snake_case) # noqa: S310
with gfile.GFile(snake_case) as f:
__snake_case = f.size()
print('''Successfully downloaded''', snake_case, snake_case, '''bytes.''')
return filepath
@deprecated(
snake_case, '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''')
def SCREAMING_SNAKE_CASE ( snake_case, snake_case=False, snake_case=False, snake_case=dtypes.floataa, snake_case=True, snake_case=50_00, snake_case=None, snake_case=DEFAULT_SOURCE_URL, ):
if fake_data:
def fake():
return _DataSet(
[], [], fake_data=snake_case, one_hot=snake_case, dtype=snake_case, seed=snake_case)
__snake_case = fake()
__snake_case = fake()
__snake_case = fake()
return _Datasets(train=snake_case, validation=snake_case, test=snake_case)
if not source_url: # empty string check
__snake_case = DEFAULT_SOURCE_URL
__snake_case = '''train-images-idx3-ubyte.gz'''
__snake_case = '''train-labels-idx1-ubyte.gz'''
__snake_case = '''t10k-images-idx3-ubyte.gz'''
__snake_case = '''t10k-labels-idx1-ubyte.gz'''
__snake_case = _maybe_download(
snake_case, snake_case, source_url + train_images_file)
with gfile.Open(snake_case, '''rb''') as f:
__snake_case = _extract_images(snake_case)
__snake_case = _maybe_download(
snake_case, snake_case, source_url + train_labels_file)
with gfile.Open(snake_case, '''rb''') as f:
__snake_case = _extract_labels(snake_case, one_hot=snake_case)
__snake_case = _maybe_download(
snake_case, snake_case, source_url + test_images_file)
with gfile.Open(snake_case, '''rb''') as f:
__snake_case = _extract_images(snake_case)
__snake_case = _maybe_download(
snake_case, snake_case, source_url + test_labels_file)
with gfile.Open(snake_case, '''rb''') as f:
__snake_case = _extract_labels(snake_case, one_hot=snake_case)
if not 0 <= validation_size <= len(snake_case):
__snake_case = (
'''Validation size should be between 0 and '''
f"{len(snake_case)}. Received: {validation_size}."
)
raise ValueError(snake_case)
__snake_case = train_images[:validation_size]
__snake_case = train_labels[:validation_size]
__snake_case = train_images[validation_size:]
__snake_case = train_labels[validation_size:]
__snake_case = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
__snake_case = _DataSet(snake_case, snake_case, **snake_case)
__snake_case = _DataSet(snake_case, snake_case, **snake_case)
__snake_case = _DataSet(snake_case, snake_case, **snake_case)
return _Datasets(train=snake_case, validation=snake_case, test=snake_case) | 700 | """simple docstring"""
def SCREAMING_SNAKE_CASE ( snake_case):
__snake_case = len(snake_case)
for i in range(length - 1):
__snake_case = i
for k in range(i + 1, snake_case):
if collection[k] < collection[least]:
__snake_case = k
if least != i:
__snake_case , __snake_case = (collection[i], collection[least])
return collection
if __name__ == "__main__":
__lowercase : Optional[Any] = input("Enter numbers separated by a comma:\n").strip()
__lowercase : Tuple = [int(item) for item in user_input.split(",")]
print(selection_sort(unsorted)) | 93 | 0 |
"""simple docstring"""
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
UpperCamelCase : str = logging.getLogger()
def __snake_case ( UpperCamelCase__ ) -> Tuple:
"""simple docstring"""
A = {}
A = os.path.join(UpperCamelCase__ , 'all_results.json' )
if os.path.exists(UpperCamelCase__ ):
with open(UpperCamelCase__ , 'r' ) as f:
A = json.load(UpperCamelCase__ )
else:
raise ValueError(f'can\'t find {path}' )
return results
UpperCamelCase : Union[str, Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class lowerCamelCase__ ( UpperCAmelCase_ ):
def __a ( self : Union[str, Any] ):
import xla_spawn
A = self.get_auto_remove_tmp_dir()
A = f'\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(_lowercase , 'argv' , _lowercase ):
A = time()
xla_spawn.main()
A = time()
A = get_results(_lowercase )
self.assertGreaterEqual(result['eval_accuracy'] , 0.7_5 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def __a ( self : List[Any] ):
import xla_spawn
A = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(_lowercase , 'argv' , _lowercase ):
xla_spawn.main()
| 690 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase__ :
def __init__( self : Optional[Any] , _lowercase : int=2 , _lowercase : Optional[Any]=3 , _lowercase : Any=64 , _lowercase : Tuple=None ):
A = np.random.default_rng(_lowercase )
A = length
A = rng.normal(size=(length,) ).astype(np.floataa )
A = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : str ):
return self.length
def __getitem__( self : List[str] , _lowercase : int ):
return {"x": self.x[i], "y": self.y[i]}
class lowerCamelCase__ ( torch.nn.Module ):
def __init__( self : Optional[int] , _lowercase : Any=0 , _lowercase : List[Any]=0 , _lowercase : Optional[int]=False ):
super().__init__()
A = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
A = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
A = True
def __a ( self : Optional[Any] , _lowercase : str=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
A = False
return x * self.a[0] + self.b[0]
class lowerCamelCase__ ( torch.nn.Module ):
def __init__( self : Optional[Any] , _lowercase : Any=0 , _lowercase : List[str]=0 , _lowercase : str=False ):
super().__init__()
A = torch.nn.Parameter(torch.tensor(_lowercase ).float() )
A = torch.nn.Parameter(torch.tensor(_lowercase ).float() )
A = True
def __a ( self : int , _lowercase : Tuple=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
A = False
return x * self.a + self.b
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ = 16 ) -> Optional[Any]:
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
A = AutoTokenizer.from_pretrained('bert-base-cased' )
A = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
A = load_dataset('csv' , data_files=UpperCamelCase__ )
A = datasets['train'].unique('label' )
A = {v: i for i, v in enumerate(UpperCamelCase__ )}
def tokenize_function(UpperCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
A = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' )
if "label" in examples:
A = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(UpperCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCamelCase__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(UpperCamelCase__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
A = DataLoader(tokenized_datasets['train'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=2 )
A = DataLoader(tokenized_datasets['validation'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=1 )
return train_dataloader, eval_dataloader
| 690 | 1 |
"""simple docstring"""
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = 'ybelkada/fonts'
def __lowerCAmelCase( ):
"""simple docstring"""
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '''
'Pix2StructImageProcessor. Please upgrade torch.' )
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ):
"""simple docstring"""
requires_backends(__UpperCAmelCase ,['torch'] )
_check_torch_version()
_lowercase : List[Any] = image_tensor.unsqueeze(0 )
_lowercase : Dict = torch.nn.functional.unfold(__UpperCAmelCase ,(patch_height, patch_width) ,stride=(patch_height, patch_width) )
_lowercase : List[Any] = patches.reshape(image_tensor.size(0 ) ,image_tensor.size(1 ) ,__UpperCAmelCase ,__UpperCAmelCase ,-1 )
_lowercase : List[str] = patches.permute(0 ,4 ,2 ,3 ,1 ).reshape(
image_tensor.size(2 ) // patch_height ,image_tensor.size(3 ) // patch_width ,image_tensor.size(1 ) * patch_height * patch_width ,)
return patches.unsqueeze(0 )
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase = 36 ,__UpperCAmelCase = "black" ,__UpperCAmelCase = "white" ,__UpperCAmelCase = 5 ,__UpperCAmelCase = 5 ,__UpperCAmelCase = 5 ,__UpperCAmelCase = 5 ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,):
"""simple docstring"""
requires_backends(__UpperCAmelCase ,'vision' )
# Add new lines so that each line is no more than 80 characters.
_lowercase : List[Any] = textwrap.TextWrapper(width=80 )
_lowercase : int = wrapper.wrap(text=__UpperCAmelCase )
_lowercase : Optional[int] = '\n'.join(__UpperCAmelCase )
if font_bytes is not None and font_path is None:
_lowercase : List[str] = io.BytesIO(__UpperCAmelCase )
elif font_path is not None:
_lowercase : int = font_path
else:
_lowercase : List[Any] = hf_hub_download(__UpperCAmelCase ,'Arial.TTF' )
_lowercase : Optional[Any] = ImageFont.truetype(__UpperCAmelCase ,encoding='UTF-8' ,size=__UpperCAmelCase )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
_lowercase : Tuple = ImageDraw.Draw(Image.new('RGB' ,(1, 1) ,__UpperCAmelCase ) )
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = temp_draw.textbbox((0, 0) ,__UpperCAmelCase ,__UpperCAmelCase )
# Create the actual image with a bit of padding around the text.
_lowercase : Optional[int] = text_width + left_padding + right_padding
_lowercase : int = text_height + top_padding + bottom_padding
_lowercase : Any = Image.new('RGB' ,(image_width, image_height) ,__UpperCAmelCase )
_lowercase : Optional[Any] = ImageDraw.Draw(__UpperCAmelCase )
draw.text(xy=(left_padding, top_padding) ,text=__UpperCAmelCase ,fill=__UpperCAmelCase ,font=__UpperCAmelCase )
return image
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ):
"""simple docstring"""
requires_backends(__UpperCAmelCase ,'vision' )
# Convert to PIL image if necessary
_lowercase : Optional[int] = to_pil_image(__UpperCAmelCase )
_lowercase : Dict = render_text(__UpperCAmelCase ,**__UpperCAmelCase )
_lowercase : Dict = max(header_image.width ,image.width )
_lowercase : Union[str, Any] = int(image.height * (new_width / image.width) )
_lowercase : Optional[Any] = int(header_image.height * (new_width / header_image.width) )
_lowercase : Union[str, Any] = Image.new('RGB' ,(new_width, new_height + new_header_height) ,'white' )
new_image.paste(header_image.resize((new_width, new_header_height) ) ,(0, 0) )
new_image.paste(image.resize((new_width, new_height) ) ,(0, new_header_height) )
# Convert back to the original framework if necessary
_lowercase : Dict = to_numpy_array(__UpperCAmelCase )
if infer_channel_dimension_format(__UpperCAmelCase ) == ChannelDimension.LAST:
_lowercase : int = to_channel_dimension_format(__UpperCAmelCase ,ChannelDimension.LAST )
return new_image
class _lowerCamelCase (__lowerCamelCase ):
_snake_case = ["flattened_patches"]
def __init__( self : Any , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = True , lowerCamelCase_ : Dict[str, int] = None , lowerCamelCase_ : int = 2_0_4_8 , lowerCamelCase_ : bool = False , **lowerCamelCase_ : Dict , ):
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
_lowercase : Union[str, Any] = patch_size if patch_size is not None else {'height': 1_6, 'width': 1_6}
_lowercase : Optional[Any] = do_normalize
_lowercase : str = do_convert_rgb
_lowercase : Union[str, Any] = max_patches
_lowercase : int = is_vqa
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : int , lowerCamelCase_ : dict , **lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
requires_backends(self.extract_flattened_patches , 'torch' )
_check_torch_version()
# convert to torch
_lowercase : str = to_channel_dimension_format(lowerCamelCase_ , ChannelDimension.FIRST )
_lowercase : List[Any] = torch.from_numpy(lowerCamelCase_ )
_lowercase , _lowercase : str = patch_size['height'], patch_size['width']
_lowercase , _lowercase : Optional[Any] = get_image_size(lowerCamelCase_ )
# maximize scale s.t.
_lowercase : str = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
_lowercase : Dict = max(min(math.floor(scale * image_height / patch_height ) , lowerCamelCase_ ) , 1 )
_lowercase : List[str] = max(min(math.floor(scale * image_width / patch_width ) , lowerCamelCase_ ) , 1 )
_lowercase : Any = max(num_feasible_rows * patch_height , 1 )
_lowercase : Optional[int] = max(num_feasible_cols * patch_width , 1 )
_lowercase : Dict = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode='bilinear' , align_corners=lowerCamelCase_ , antialias=lowerCamelCase_ , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
_lowercase : Any = torch_extract_patches(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_lowercase : str = patches.shape
_lowercase : List[Any] = patches_shape[1]
_lowercase : List[str] = patches_shape[2]
_lowercase : int = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
_lowercase : Dict = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
_lowercase : Optional[int] = torch.arange(lowerCamelCase_ ).reshape([rows, 1] ).repeat(1 , lowerCamelCase_ ).reshape([rows * columns, 1] )
_lowercase : List[Any] = torch.arange(lowerCamelCase_ ).reshape([1, columns] ).repeat(lowerCamelCase_ , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
_lowercase : Any = row_ids.to(torch.floataa )
_lowercase : Optional[Any] = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
_lowercase : Dict = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
_lowercase : Dict = torch.nn.functional.pad(lowerCamelCase_ , [0, 0, 0, max_patches - (rows * columns)] ).float()
_lowercase : Tuple = to_numpy_array(lowerCamelCase_ )
return result
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
if image.dtype == np.uinta:
_lowercase : str = image.astype(np.floataa )
# take mean across the whole `image`
_lowercase : List[Any] = np.mean(lowerCamelCase_ )
_lowercase : Any = np.std(lowerCamelCase_ )
_lowercase : Dict = max(lowerCamelCase_ , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ , **lowerCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : ImageInput , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[Dict[str, int]] = None , lowerCamelCase_ : Optional[Union[str, TensorType]] = None , lowerCamelCase_ : ChannelDimension = ChannelDimension.FIRST , **lowerCamelCase_ : Tuple , ):
"""simple docstring"""
_lowercase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
_lowercase : Union[str, Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_lowercase : str = patch_size if patch_size is not None else self.patch_size
_lowercase : Dict = max_patches if max_patches is not None else self.max_patches
_lowercase : List[str] = self.is_vqa
if kwargs.get('data_format' , lowerCamelCase_ ) is not None:
raise ValueError('data_format is not an accepted input as the outputs are ' )
_lowercase : Any = make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_lowercase : Tuple = [convert_to_rgb(lowerCamelCase_ ) for image in images]
# All transformations expect numpy arrays.
_lowercase : Any = [to_numpy_array(lowerCamelCase_ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('A header text must be provided for VQA models.' )
_lowercase : str = kwargs.pop('font_bytes' , lowerCamelCase_ )
_lowercase : str = kwargs.pop('font_path' , lowerCamelCase_ )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_lowercase : Any = [header_text] * len(lowerCamelCase_ )
_lowercase : Dict = [
render_header(lowerCamelCase_ , header_text[i] , font_bytes=lowerCamelCase_ , font_path=lowerCamelCase_ )
for i, image in enumerate(lowerCamelCase_ )
]
if do_normalize:
_lowercase : Tuple = [self.normalize(image=lowerCamelCase_ ) for image in images]
# convert to torch tensor and permute
_lowercase : List[Any] = [
self.extract_flattened_patches(image=lowerCamelCase_ , max_patches=lowerCamelCase_ , patch_size=lowerCamelCase_ )
for image in images
]
# create attention mask in numpy
_lowercase : Union[str, Any] = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
_lowercase : str = BatchFeature(
data={'flattened_patches': images, 'attention_mask': attention_masks} , tensor_type=lowerCamelCase_ )
return encoded_outputs
| 283 | """simple docstring"""
import math
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
_lowercase : List[Any] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(__UpperCAmelCase )
if number < 1:
_lowercase : List[Any] = F'''Input value of [number={number}] must be > 0'''
raise ValueError(__UpperCAmelCase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
_lowercase : str = int(math.log(number // 3 ,2 ) ) + 2
_lowercase : Union[str, Any] = [3, 5]
_lowercase : Optional[int] = 2
_lowercase : List[Any] = 3
for block in range(1 ,__UpperCAmelCase ):
for _ in range(__UpperCAmelCase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
SCREAMING_SNAKE_CASE = 0
try:
SCREAMING_SNAKE_CASE = proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""")
| 283 | 1 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _a () -> Any:
"""simple docstring"""
__snake_case = HfArgumentParser(lowercase__ )
__snake_case = parser.parse_args_into_dataclasses()[0]
__snake_case = TensorFlowBenchmark(args=lowercase__ )
try:
__snake_case = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__snake_case = 'Arg --no_{0} is no longer used, please use --no-{0} instead.'
__snake_case = ' '.join(str(lowercase__ ).split(' ' )[:-1] )
__snake_case = ''
__snake_case = eval(str(lowercase__ ).split(' ' )[-1] )
__snake_case = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase__ )
if len(lowercase__ ) > 0:
__snake_case = full_error_msg + begin_error_msg + str(lowercase__ )
raise ValueError(lowercase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 56 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
a : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 | 0 |
a =[
"""Audio""",
"""Array2D""",
"""Array3D""",
"""Array4D""",
"""Array5D""",
"""ClassLabel""",
"""Features""",
"""Sequence""",
"""Value""",
"""Image""",
"""Translation""",
"""TranslationVariableLanguages""",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 337 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a ={
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 337 | 1 |
from collections.abc import Sequence
from queue import Queue
class _lowerCAmelCase:
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
UpperCamelCase_: Tuple = start
UpperCamelCase_: Tuple = end
UpperCamelCase_: Optional[Any] = val
UpperCamelCase_: List[str] = (start + end) // 2
UpperCamelCase_: List[str] = left
UpperCamelCase_: List[Any] = right
def __repr__( self ):
return f'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class _lowerCAmelCase:
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: str = collection
UpperCamelCase_: Optional[Any] = function
if self.collection:
UpperCamelCase_: str = self._build_tree(0 , len(_lowerCamelCase ) - 1 )
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
self._update_tree(self.root , _lowerCamelCase , _lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
return self._query_range(self.root , _lowerCamelCase , _lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
if start == end:
return SegmentTreeNode(_lowerCamelCase , _lowerCamelCase , self.collection[start] )
UpperCamelCase_: int = (start + end) // 2
UpperCamelCase_: List[str] = self._build_tree(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: Any = self._build_tree(mid + 1 , _lowerCamelCase )
return SegmentTreeNode(_lowerCamelCase , _lowerCamelCase , self.fn(left.val , right.val ) , _lowerCamelCase , _lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if node.start == i and node.end == i:
UpperCamelCase_: List[Any] = val
return
if i <= node.mid:
self._update_tree(node.left , _lowerCamelCase , _lowerCamelCase )
else:
self._update_tree(node.right , _lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: Any = self.fn(node.left.val , node.right.val )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , _lowerCamelCase , _lowerCamelCase )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , _lowerCamelCase , node.mid ) , self._query_range(node.right , node.mid + 1 , _lowerCamelCase ) , )
else:
# range in right child tree
return self._query_range(node.right , _lowerCamelCase , _lowerCamelCase )
def _a ( self ):
if self.root is not None:
UpperCamelCase_: Union[str, Any] = Queue()
queue.put(self.root )
while not queue.empty():
UpperCamelCase_: List[str] = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('*' * 50)
A_ : List[str] = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print() | 57 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ = {
'configuration_mobilenet_v2': [
'MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileNetV2Config',
'MobileNetV2OnnxConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['MobileNetV2FeatureExtractor']
UpperCAmelCase_ = ['MobileNetV2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileNetV2ForImageClassification',
'MobileNetV2ForSemanticSegmentation',
'MobileNetV2Model',
'MobileNetV2PreTrainedModel',
'load_tf_weights_in_mobilenet_v2',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 603 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
SCREAMING_SNAKE_CASE : int = False
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self ):
return 12
@property
def UpperCamelCase ( self ):
return 12
@property
def UpperCamelCase ( self ):
return 32
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
lowercase_ :List[Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
lowercase_ :int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(UpperCamelCase_ )
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
lowercase_ :int = 12
lowercase_ :List[Any] = 12
lowercase_ :Dict = {
'''attention_bias''': True,
'''cross_attention_dim''': 32,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 32,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
lowercase_ :int = TransformeraDModel(**UpperCamelCase_ )
return model
def UpperCamelCase ( self ):
lowercase_ :List[str] = '''cpu'''
lowercase_ :int = self.dummy_vqvae
lowercase_ :int = self.dummy_text_encoder
lowercase_ :Any = self.dummy_tokenizer
lowercase_ :Optional[int] = self.dummy_transformer
lowercase_ :List[str] = VQDiffusionScheduler(self.num_embed )
lowercase_ :int = LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCamelCase_ )
lowercase_ :List[Any] = VQDiffusionPipeline(
vqvae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , transformer=UpperCamelCase_ , scheduler=UpperCamelCase_ , learned_classifier_free_sampling_embeddings=UpperCamelCase_ , )
lowercase_ :Union[str, Any] = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :Dict = '''teddy bear playing in the pool'''
lowercase_ :Dict = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :Optional[int] = pipe([prompt] , generator=UpperCamelCase_ , num_inference_steps=2 , output_type='''np''' )
lowercase_ :Any = output.images
lowercase_ :Tuple = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :str = pipe(
[prompt] , generator=UpperCamelCase_ , output_type='''np''' , return_dict=UpperCamelCase_ , num_inference_steps=2 )[0]
lowercase_ :Optional[Any] = image[0, -3:, -3:, -1]
lowercase_ :Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
lowercase_ :str = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self ):
lowercase_ :int = '''cpu'''
lowercase_ :Dict = self.dummy_vqvae
lowercase_ :str = self.dummy_text_encoder
lowercase_ :List[Any] = self.dummy_tokenizer
lowercase_ :Any = self.dummy_transformer
lowercase_ :Optional[Any] = VQDiffusionScheduler(self.num_embed )
lowercase_ :List[str] = LearnedClassifierFreeSamplingEmbeddings(
learnable=UpperCamelCase_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
lowercase_ :Optional[int] = VQDiffusionPipeline(
vqvae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , transformer=UpperCamelCase_ , scheduler=UpperCamelCase_ , learned_classifier_free_sampling_embeddings=UpperCamelCase_ , )
lowercase_ :Dict = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :int = '''teddy bear playing in the pool'''
lowercase_ :Tuple = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :Optional[int] = pipe([prompt] , generator=UpperCamelCase_ , num_inference_steps=2 , output_type='''np''' )
lowercase_ :Optional[Any] = output.images
lowercase_ :Optional[int] = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :Dict = pipe(
[prompt] , generator=UpperCamelCase_ , output_type='''np''' , return_dict=UpperCamelCase_ , num_inference_steps=2 )[0]
lowercase_ :List[str] = image[0, -3:, -3:, -1]
lowercase_ :Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
lowercase_ :Dict = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ):
lowercase_ :List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
lowercase_ :int = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
lowercase_ :Tuple = pipeline.to(UpperCamelCase_ )
pipeline.set_progress_bar_config(disable=UpperCamelCase_ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
lowercase_ :Dict = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :int = pipeline(
'''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=UpperCamelCase_ , output_type='''np''' , )
lowercase_ :List[str] = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 441 |
def UpperCamelCase ( _a , _a ) -> int:
'''simple docstring'''
while a != 0:
lowercase_ , lowercase_ :Union[str, Any] = b % a, a
return b
def UpperCamelCase ( _a , _a ) -> int:
'''simple docstring'''
if gcd(_a , _a ) != 1:
lowercase_ :Union[str, Any] = f"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(_a )
lowercase_ , lowercase_ , lowercase_ :Any = 1, 0, a
lowercase_ , lowercase_ , lowercase_ :List[str] = 0, 1, m
while va != 0:
lowercase_ :Tuple = ua // va
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ :Optional[int] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 441 | 1 |
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE__ : int = 10**9 ) -> int:
'''simple docstring'''
_UpperCAmelCase : Dict = 1
_UpperCAmelCase : int = 2
_UpperCAmelCase : Tuple = 0
_UpperCAmelCase : Optional[int] = 0
_UpperCAmelCase : str = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
_UpperCAmelCase : Tuple = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"{solution() = }")
| 289 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase_ ( _UpperCamelCase ):
def snake_case_ ( self : str ):
_UpperCAmelCase : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A , "width_multiplier" ) )
class UpperCAmelCase_ :
def __init__( self : Dict , A : Union[str, Any] , A : Dict=1_3 , A : Union[str, Any]=6_4 , A : int=2 , A : Tuple=3 , A : int="swish" , A : Optional[int]=3 , A : Tuple=3_2 , A : Tuple=0.1 , A : List[Any]=0.02 , A : Dict=True , A : str=True , A : Any=1_0 , A : Optional[int]=None , A : int=0.25 , A : int=0.0 , A : Dict=0.0 , ):
_UpperCAmelCase : List[Any] = parent
_UpperCAmelCase : Optional[Any] = batch_size
_UpperCAmelCase : Dict = image_size
_UpperCAmelCase : int = patch_size
_UpperCAmelCase : List[str] = num_channels
_UpperCAmelCase : Optional[int] = make_divisible(5_1_2 * width_multiplier , divisor=8 )
_UpperCAmelCase : Dict = hidden_act
_UpperCAmelCase : List[str] = conv_kernel_size
_UpperCAmelCase : Optional[int] = output_stride
_UpperCAmelCase : Union[str, Any] = classifier_dropout_prob
_UpperCAmelCase : str = use_labels
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : Optional[int] = num_labels
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Tuple = scope
_UpperCAmelCase : List[Any] = width_multiplier
_UpperCAmelCase : List[str] = ffn_dropout
_UpperCAmelCase : List[Any] = attn_dropout
def snake_case_ ( self : Union[str, Any] ):
_UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : str = None
_UpperCAmelCase : List[Any] = None
if self.use_labels:
_UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels, pixel_labels
def snake_case_ ( self : List[Any] ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def snake_case_ ( self : int , A : Optional[Any] , A : Optional[int] , A : Any , A : int ):
_UpperCAmelCase : List[str] = MobileViTVaModel(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : str = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def snake_case_ ( self : Any , A : Optional[int] , A : Optional[Any] , A : str , A : List[str] ):
_UpperCAmelCase : List[str] = self.num_labels
_UpperCAmelCase : int = MobileViTVaForImageClassification(A )
model.to(A )
model.eval()
_UpperCAmelCase : str = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self : Optional[int] , A : Tuple , A : Any , A : Any , A : Optional[Any] ):
_UpperCAmelCase : Union[str, Any] = self.num_labels
_UpperCAmelCase : Optional[int] = MobileViTVaForSemanticSegmentation(A )
model.to(A )
model.eval()
_UpperCAmelCase : List[str] = model(A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_UpperCAmelCase : Optional[int] = model(A , labels=A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def snake_case_ ( self : Any ):
_UpperCAmelCase : Any = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = config_and_inputs
_UpperCAmelCase : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : Optional[Any] = (
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
def snake_case_ ( self : Union[str, Any] ):
_UpperCAmelCase : Dict = MobileViTVaModelTester(self )
_UpperCAmelCase : Optional[Any] = MobileViTVaConfigTester(self , config_class=A , has_text_modality=A )
def snake_case_ ( self : int ):
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViTV2 does not use inputs_embeds" )
def snake_case_ ( self : Optional[int] ):
pass
@unittest.skip(reason="MobileViTV2 does not support input and output embeddings" )
def snake_case_ ( self : Optional[Any] ):
pass
@unittest.skip(reason="MobileViTV2 does not output attentions" )
def snake_case_ ( self : int ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run." )
def snake_case_ ( self : Any ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def snake_case_ ( self : str ):
pass
def snake_case_ ( self : Union[str, Any] ):
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Union[str, Any] = model_class(A )
_UpperCAmelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : int = [*signature.parameters.keys()]
_UpperCAmelCase : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , A )
def snake_case_ ( self : int ):
_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def snake_case_ ( self : Optional[Any] ):
def check_hidden_states_output(A : Union[str, Any] , A : Dict , A : str ):
_UpperCAmelCase : int = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_UpperCAmelCase : Tuple = model(**self._prepare_for_class(A , A ) )
_UpperCAmelCase : List[str] = outputs.hidden_states
_UpperCAmelCase : int = 5
self.assertEqual(len(A ) , A )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_UpperCAmelCase : Optional[int] = 2
for i in range(len(A ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Optional[Any] = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase : str = True
check_hidden_states_output(A , A , A )
def snake_case_ ( self : Union[str, Any] ):
_UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def snake_case_ ( self : Union[str, Any] ):
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A )
@slow
def snake_case_ ( self : int ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Union[str, Any] = MobileViTVaModel.from_pretrained(A )
self.assertIsNotNone(A )
def __snake_case ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
@cached_property
def snake_case_ ( self : Any ):
return (
MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" )
if is_vision_available()
else None
)
@slow
def snake_case_ ( self : Dict ):
_UpperCAmelCase : Dict = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ).to(
A )
_UpperCAmelCase : Optional[Any] = self.default_image_processor
_UpperCAmelCase : Tuple = prepare_img()
_UpperCAmelCase : Union[str, Any] = image_processor(images=A , return_tensors="pt" ).to(A )
# forward pass
with torch.no_grad():
_UpperCAmelCase : Tuple = model(**A )
# verify the logits
_UpperCAmelCase : Tuple = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , A )
_UpperCAmelCase : Optional[Any] = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1e-4 ) )
@slow
def snake_case_ ( self : Optional[int] ):
_UpperCAmelCase : List[Any] = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
_UpperCAmelCase : Any = model.to(A )
_UpperCAmelCase : Optional[Any] = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
_UpperCAmelCase : Dict = prepare_img()
_UpperCAmelCase : Union[str, Any] = image_processor(images=A , return_tensors="pt" ).to(A )
# forward pass
with torch.no_grad():
_UpperCAmelCase : int = model(**A )
_UpperCAmelCase : Any = outputs.logits
# verify the logits
_UpperCAmelCase : Optional[int] = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape , A )
_UpperCAmelCase : Union[str, Any] = torch.tensor(
[
[[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]],
[[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]],
[[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]],
] , device=A , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A , atol=1e-4 ) )
@slow
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : List[str] = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
_UpperCAmelCase : Tuple = model.to(A )
_UpperCAmelCase : Dict = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
_UpperCAmelCase : Optional[int] = prepare_img()
_UpperCAmelCase : Optional[int] = image_processor(images=A , return_tensors="pt" ).to(A )
# forward pass
with torch.no_grad():
_UpperCAmelCase : Tuple = model(**A )
_UpperCAmelCase : Optional[int] = outputs.logits.detach().cpu()
_UpperCAmelCase : List[Any] = image_processor.post_process_semantic_segmentation(outputs=A , target_sizes=[(5_0, 6_0)] )
_UpperCAmelCase : Dict = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape , A )
_UpperCAmelCase : Dict = image_processor.post_process_semantic_segmentation(outputs=A )
_UpperCAmelCase : Union[str, Any] = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape , A )
| 289 | 1 |
'''simple docstring'''
def A (__lowerCamelCase :str , __lowerCamelCase :str ):
assert x is not None
assert y is not None
_lowerCAmelCase = len(__lowerCamelCase )
_lowerCAmelCase = len(__lowerCamelCase )
# declaring the array for storing the dp values
_lowerCAmelCase = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
_lowerCAmelCase = 1 if x[i - 1] == y[j - 1] else 0
_lowerCAmelCase = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
_lowerCAmelCase = """"""
_lowerCAmelCase , _lowerCAmelCase = m, n
while i > 0 and j > 0:
_lowerCAmelCase = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
_lowerCAmelCase = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
_lowercase = """AGGTAB"""
_lowercase = """GXTXAYB"""
_lowercase = 4
_lowercase = """GTAB"""
_lowercase , _lowercase = longest_common_subsequence(a, b)
print("""len =""", ln, """, sub-sequence =""", subseq)
import doctest
doctest.testmod()
| 162 |
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
_lowercase = 300 # TEMPERATURE (unit = K)
def A (__lowerCamelCase :float , __lowerCamelCase :float , __lowerCamelCase :float , ):
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 162 | 1 |
'''simple docstring'''
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
UpperCAmelCase__ : Optional[int] = logging.getLogger(__name__)
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser(
description="Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"
)
parser.add_argument(
"--data_file", type=str, default="data/dump.bert-base-uncased.pickle", help="The binarized dataset."
)
parser.add_argument(
"--token_counts_dump", type=str, default="data/token_counts.bert-base-uncased.pickle", help="The dump file."
)
parser.add_argument("--vocab_size", default=3_05_22, type=int)
UpperCAmelCase__ : Union[str, Any] = parser.parse_args()
logger.info(F"Loading data from {args.data_file}")
with open(args.data_file, "rb") as fp:
UpperCAmelCase__ : Optional[int] = pickle.load(fp)
logger.info("Counting occurrences for MLM.")
UpperCAmelCase__ : Optional[int] = Counter()
for tk_ids in data:
counter.update(tk_ids)
UpperCAmelCase__ : str = [0] * args.vocab_size
for k, v in counter.items():
UpperCAmelCase__ : Dict = v
logger.info(F"Dump to {args.token_counts_dump}")
with open(args.token_counts_dump, "wb") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 48 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A ( UpperCamelCase_ : List[Any] ) -> Tuple:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
lowerCAmelCase__ = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
lowerCAmelCase__ = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
lowerCAmelCase__ = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
lowerCAmelCase__ = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
lowerCAmelCase__ = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
lowerCAmelCase__ = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowerCAmelCase__ = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
lowerCAmelCase__ = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
lowerCAmelCase__ = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
lowerCAmelCase__ = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
lowerCAmelCase__ = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
lowerCAmelCase__ = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
lowerCAmelCase__ = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
lowerCAmelCase__ = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
lowerCAmelCase__ = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
lowerCAmelCase__ = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
lowerCAmelCase__ = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
lowerCAmelCase__ = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
lowerCAmelCase__ = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
lowerCAmelCase__ = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
lowerCAmelCase__ = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
lowerCAmelCase__ = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
lowerCAmelCase__ = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
lowerCAmelCase__ = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def A ( UpperCamelCase_ : str , UpperCamelCase_ : str ) -> List[Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ = orig_state_dict.pop(UpperCamelCase_ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCAmelCase__ = key.split("." )
lowerCAmelCase__ ,lowerCAmelCase__ = int(key_split[2] ), int(key_split[4] )
lowerCAmelCase__ = config.vision_config.hidden_size
if "weight" in key:
lowerCAmelCase__ = val[:dim, :]
lowerCAmelCase__ = val[dim : dim * 2, :]
lowerCAmelCase__ = val[-dim:, :]
else:
lowerCAmelCase__ = val[:dim]
lowerCAmelCase__ = val[dim : dim * 2]
lowerCAmelCase__ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCAmelCase__ = key.split("." )
lowerCAmelCase__ = int(key_split[3] )
lowerCAmelCase__ = config.text_config.hidden_size
if "weight" in key:
lowerCAmelCase__ = val[:dim, :]
lowerCAmelCase__ = val[
dim : dim * 2, :
]
lowerCAmelCase__ = val[-dim:, :]
else:
lowerCAmelCase__ = val[:dim]
lowerCAmelCase__ = val[dim : dim * 2]
lowerCAmelCase__ = val[-dim:]
else:
lowerCAmelCase__ = rename_key(UpperCamelCase_ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowerCAmelCase__ = val.squeeze_()
else:
lowerCAmelCase__ = val
return orig_state_dict
def A ( ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return im
@torch.no_grad()
def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple="groupvit-gcc-yfcc" , UpperCamelCase_ : Dict=False ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = GroupViTConfig()
lowerCAmelCase__ = GroupViTModel(UpperCamelCase_ ).eval()
lowerCAmelCase__ = torch.load(UpperCamelCase_ , map_location="cpu" )["model"]
lowerCAmelCase__ = convert_state_dict(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ ,lowerCAmelCase__ = model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCamelCase_ ) == 0)
# verify result
lowerCAmelCase__ = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = processor(text=["a photo of a cat", "a photo of a dog"] , images=UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors="pt" )
with torch.no_grad():
lowerCAmelCase__ = model(**UpperCamelCase_ )
if model_name == "groupvit-gcc-yfcc":
lowerCAmelCase__ = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
lowerCAmelCase__ = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(F"""Model name {model_name} not supported.""" )
assert torch.allclose(outputs.logits_per_image , UpperCamelCase_ , atol=1E-3 )
processor.save_pretrained(UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
print("Successfully saved processor and model to" , UpperCamelCase_ )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(UpperCamelCase_ , organization="nielsr" )
model.push_to_hub(UpperCamelCase_ , organization="nielsr" )
if __name__ == "__main__":
UpperCAmelCase__ : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model."
)
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint")
parser.add_argument(
"--model_name",
default="groupvit-gccy-fcc",
type=str,
help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.",
)
UpperCAmelCase__ : Any = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 48 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _a ( metaclass=lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = ["""speech"""]
def __init__( self : List[str] , *__UpperCamelCase : int , **__UpperCamelCase : Any )->Optional[int]:
requires_backends(self , ['''speech'''] )
class _a ( metaclass=lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = ["""speech"""]
def __init__( self : Optional[Any] , *__UpperCamelCase : Dict , **__UpperCamelCase : Any )->List[Any]:
requires_backends(self , ['''speech'''] )
| 718 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A : Optional[int] = logging.get_logger(__name__)
__A : Optional[Any] = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class _a ( lowerCAmelCase , lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = """bit"""
UpperCamelCase__ = ["""preactivation""", """bottleneck"""]
UpperCamelCase__ = ["""SAME""", """VALID"""]
def __init__( self : List[Any] , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : Optional[int]=6_4 , __UpperCamelCase : List[Any]=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , __UpperCamelCase : Any=[3, 4, 6, 3] , __UpperCamelCase : str="preactivation" , __UpperCamelCase : Union[str, Any]="relu" , __UpperCamelCase : str=None , __UpperCamelCase : Dict=3_2 , __UpperCamelCase : List[str]=0.0 , __UpperCamelCase : Any=False , __UpperCamelCase : Union[str, Any]=3_2 , __UpperCamelCase : List[str]=1 , __UpperCamelCase : List[str]=None , __UpperCamelCase : Optional[Any]=None , **__UpperCamelCase : List[str] , )->List[Any]:
super().__init__(**__UpperCamelCase )
if layer_type not in self.layer_types:
raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
_UpperCAmelCase = global_padding.upper()
else:
raise ValueError(F'Padding strategy {global_padding} not supported' )
_UpperCAmelCase = num_channels
_UpperCAmelCase = embedding_size
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = layer_type
_UpperCAmelCase = hidden_act
_UpperCAmelCase = global_padding
_UpperCAmelCase = num_groups
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = embedding_dynamic_padding
_UpperCAmelCase = output_stride
_UpperCAmelCase = width_factor
_UpperCAmelCase = ['''stem'''] + [F'stage{idx}' for idx in range(1 , len(__UpperCamelCase ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=__UpperCamelCase , out_indices=__UpperCamelCase , stage_names=self.stage_names )
| 95 | 0 |
from math import ceil
def UpperCamelCase_ ( __a = 1_001 ) -> int:
a__ : Optional[Any] = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
a__ : List[str] = 2 * i + 1
a__ : Optional[int] = 2 * i
a__ : Dict = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
UpperCamelCase : List[str] = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 37 |
import math
def snake_case__ ( UpperCAmelCase : int ):
assert isinstance(UpperCAmelCase , UpperCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
lowerCAmelCase__ :Tuple = range(3 , int(math.sqrt(UpperCAmelCase ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def snake_case__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict=1 , **UpperCAmelCase : List[str] ):
lowerCAmelCase__ :Dict = factor * value
lowerCAmelCase__ :Optional[Any] = value
while not is_prime(UpperCAmelCase ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **UpperCAmelCase )
return value
| 145 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''blenderbot-small'''
lowerCAmelCase_ = ['''past_key_values''']
lowerCAmelCase_ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : str , _A : List[str]=5_0265 , _A : Tuple=512 , _A : Optional[Any]=8 , _A : Any=2048 , _A : List[str]=16 , _A : str=8 , _A : Optional[int]=2048 , _A : Tuple=16 , _A : Any=0.0 , _A : Any=0.0 , _A : int=True , _A : Union[str, Any]=True , _A : str="gelu" , _A : Tuple=512 , _A : int=0.1 , _A : Dict=0.0 , _A : List[str]=0.0 , _A : Union[str, Any]=0.02 , _A : List[Any]=1 , _A : Tuple=False , _A : Optional[Any]=0 , _A : Optional[Any]=1 , _A : str=2 , _A : Optional[int]=2 , **_A : Any , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = vocab_size
__SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_model
__SCREAMING_SNAKE_CASE : List[Any] = encoder_ffn_dim
__SCREAMING_SNAKE_CASE : Tuple = encoder_layers
__SCREAMING_SNAKE_CASE : int = encoder_attention_heads
__SCREAMING_SNAKE_CASE : Tuple = decoder_ffn_dim
__SCREAMING_SNAKE_CASE : Tuple = decoder_layers
__SCREAMING_SNAKE_CASE : Optional[Any] = decoder_attention_heads
__SCREAMING_SNAKE_CASE : Tuple = dropout
__SCREAMING_SNAKE_CASE : str = attention_dropout
__SCREAMING_SNAKE_CASE : List[Any] = activation_dropout
__SCREAMING_SNAKE_CASE : Optional[int] = activation_function
__SCREAMING_SNAKE_CASE : Any = init_std
__SCREAMING_SNAKE_CASE : int = encoder_layerdrop
__SCREAMING_SNAKE_CASE : str = decoder_layerdrop
__SCREAMING_SNAKE_CASE : List[Any] = use_cache
__SCREAMING_SNAKE_CASE : List[str] = encoder_layers
__SCREAMING_SNAKE_CASE : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , is_encoder_decoder=_A , decoder_start_token_id=_A , forced_eos_token_id=_A , **_A , )
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__SCREAMING_SNAKE_CASE : Tuple = {0: '''batch'''}
__SCREAMING_SNAKE_CASE : List[str] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__SCREAMING_SNAKE_CASE : Dict = {0: '''batch''', 1: '''decoder_sequence'''}
__SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_A , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__SCREAMING_SNAKE_CASE : Optional[int] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_layers
for i in range(_A ):
__SCREAMING_SNAKE_CASE : List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''}
__SCREAMING_SNAKE_CASE : Any = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
__SCREAMING_SNAKE_CASE : int = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE : Any = super().outputs
else:
__SCREAMING_SNAKE_CASE : int = super(_A , self ).outputs
if self.use_past:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_layers
for i in range(_A ):
__SCREAMING_SNAKE_CASE : List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''}
__SCREAMING_SNAKE_CASE : Dict = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def UpperCAmelCase__ ( self : Tuple , _A : PreTrainedTokenizer , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A , _A , _A , _A , _A )
# Generate decoder inputs
__SCREAMING_SNAKE_CASE : List[Any] = seq_length if not self.use_past else 1
__SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A , _A , _A , _A , _A )
__SCREAMING_SNAKE_CASE : str = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
__SCREAMING_SNAKE_CASE : Optional[int] = dict(**_A , **_A )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__SCREAMING_SNAKE_CASE : Tuple = common_inputs['''input_ids'''].shape
__SCREAMING_SNAKE_CASE : List[Any] = common_inputs['''decoder_input_ids'''].shape[1]
__SCREAMING_SNAKE_CASE : str = self.num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[int] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__SCREAMING_SNAKE_CASE : List[str] = decoder_seq_length + 3
__SCREAMING_SNAKE_CASE : Tuple = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__SCREAMING_SNAKE_CASE : str = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(_A , _A )] , dim=1 )
__SCREAMING_SNAKE_CASE : Optional[Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_layers
__SCREAMING_SNAKE_CASE : Tuple = min(_A , _A )
__SCREAMING_SNAKE_CASE : int = max(_A , _A ) - min_num_layers
__SCREAMING_SNAKE_CASE : int = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(_A ):
common_inputs["past_key_values"].append(
(
torch.zeros(_A ),
torch.zeros(_A ),
torch.zeros(_A ),
torch.zeros(_A ),
) )
# TODO: test this.
__SCREAMING_SNAKE_CASE : Dict = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(_A , _A ):
common_inputs["past_key_values"].append((torch.zeros(_A ), torch.zeros(_A )) )
return common_inputs
def UpperCAmelCase__ ( self : Optional[Any] , _A : PreTrainedTokenizer , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A , _A , _A , _A , _A )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__SCREAMING_SNAKE_CASE : Optional[Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__SCREAMING_SNAKE_CASE : Optional[int] = seqlen + 2
__SCREAMING_SNAKE_CASE : str = self.num_layers
__SCREAMING_SNAKE_CASE : Any = self.num_attention_heads
__SCREAMING_SNAKE_CASE : int = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__SCREAMING_SNAKE_CASE : Any = common_inputs['''attention_mask'''].dtype
__SCREAMING_SNAKE_CASE : Any = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(_A , _A , dtype=_A )] , dim=1 )
__SCREAMING_SNAKE_CASE : List[str] = [
(torch.zeros(_A ), torch.zeros(_A )) for _ in range(_A )
]
return common_inputs
def UpperCAmelCase__ ( self : Tuple , _A : PreTrainedTokenizer , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = compute_effective_axis_dimension(
_A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.num_special_tokens_to_add(_A )
__SCREAMING_SNAKE_CASE : Any = compute_effective_axis_dimension(
_A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_A )
# Generate dummy inputs according to compute batch and sequence
__SCREAMING_SNAKE_CASE : str = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
__SCREAMING_SNAKE_CASE : List[str] = dict(tokenizer(_A , return_tensors=_A ) )
return common_inputs
def UpperCAmelCase__ ( self : Optional[int] , _A : PreTrainedTokenizer , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE : int = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
elif self.task == "causal-lm":
__SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_inputs_for_causal_lm(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
return common_inputs
def UpperCAmelCase__ ( self : List[str] , _A : Tuple , _A : str , _A : Optional[int] , _A : Tuple ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE : Dict = super()._flatten_past_key_values_(_A , _A , _A , _A )
else:
__SCREAMING_SNAKE_CASE : int = super(_A , self )._flatten_past_key_values_(
_A , _A , _A , _A )
| 700 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {"""configuration_deit""": ["""DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DeiTConfig""", """DeiTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""DeiTFeatureExtractor"""]
lowercase_ = ["""DeiTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DeiTForImageClassification""",
"""DeiTForImageClassificationWithTeacher""",
"""DeiTForMaskedImageModeling""",
"""DeiTModel""",
"""DeiTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDeiTForImageClassification""",
"""TFDeiTForImageClassificationWithTeacher""",
"""TFDeiTForMaskedImageModeling""",
"""TFDeiTModel""",
"""TFDeiTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 131 | 0 |
"""simple docstring"""
from graphs.minimum_spanning_tree_kruskal import kruskal
def lowerCamelCase_ ( ):
lowerCamelCase_ = 9
lowerCamelCase_ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 1_4],
[3, 4, 9],
[5, 4, 1_0],
[1, 7, 1_1],
]
lowerCamelCase_ = kruskal(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase_ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(_lowerCamelCase ) == sorted(_lowerCamelCase ) | 142 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase : int = 6_0_0_8_5_1_4_7_5_1_4_3 ):
try:
lowerCamelCase_ = int(_lowerCamelCase )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
lowerCamelCase_ = 1
lowerCamelCase_ = 2
while i * i <= n:
while n % i == 0:
lowerCamelCase_ = i
n //= i
i += 1
if n > 1:
lowerCamelCase_ = n
return int(_lowerCamelCase )
if __name__ == "__main__":
print(f'''{solution() = }''') | 142 | 1 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__lowercase = logging.get_logger(__name__)
# TODO: upload to AWS
__lowercase = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Dict = '''retribert'''
def __init__( self , __lowerCAmelCase=30522 , __lowerCAmelCase=768 , __lowerCAmelCase=8 , __lowerCAmelCase=12 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-1_2 , __lowerCAmelCase=True , __lowerCAmelCase=128 , __lowerCAmelCase=0 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = share_encoders
lowerCAmelCase = projection_dim
| 605 | '''simple docstring'''
from math import ceil, sqrt
def snake_case__ ( _A: int = 1000000 ) -> int:
'''simple docstring'''
lowerCAmelCase = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
lowerCAmelCase = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
lowerCAmelCase = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'{solution() = }')
| 605 | 1 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__snake_case : Any = logging.get_logger(__name__)
class A ( a ):
__UpperCAmelCase : str = """linear"""
__UpperCAmelCase : List[str] = """cosine"""
__UpperCAmelCase : Optional[int] = """cosine_with_restarts"""
__UpperCAmelCase : str = """polynomial"""
__UpperCAmelCase : Tuple = """constant"""
__UpperCAmelCase : List[Any] = """constant_with_warmup"""
__UpperCAmelCase : Dict = """piecewise_constant"""
def _lowercase ( lowerCamelCase__ : Optimizer, lowerCamelCase__ : int = -1 ):
return LambdaLR(lowerCamelCase__, lambda lowerCamelCase__ : 1, last_epoch=lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Optimizer, lowerCamelCase__ : int, lowerCamelCase__ : int = -1 ):
def lr_lambda(lowerCamelCase__ : int ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1.0, lowerCamelCase__ ) )
return 1.0
return LambdaLR(lowerCamelCase__, lowerCamelCase__, last_epoch=lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Optimizer, lowerCamelCase__ : str, lowerCamelCase__ : int = -1 ):
_a = {}
_a = step_rules.split("," )
for rule_str in rule_list[:-1]:
_a , _a = rule_str.split(":" )
_a = int(lowerCamelCase__ )
_a = float(lowerCamelCase__ )
_a = value
_a = float(rule_list[-1] )
def create_rules_function(lowerCamelCase__ : str, lowerCamelCase__ : Dict ):
def rule_func(lowerCamelCase__ : int ) -> float:
_a = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCamelCase__ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
_a = create_rules_function(lowerCamelCase__, lowerCamelCase__ )
return LambdaLR(lowerCamelCase__, lowerCamelCase__, last_epoch=lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Tuple, lowerCamelCase__ : List[str]=-1 ):
def lr_lambda(lowerCamelCase__ : int ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1, lowerCamelCase__ ) )
return max(
0.0, float(num_training_steps - current_step ) / float(max(1, num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Optimizer, lowerCamelCase__ : int, lowerCamelCase__ : int, lowerCamelCase__ : float = 0.5, lowerCamelCase__ : int = -1 ):
def lr_lambda(lowerCamelCase__ : int ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1, lowerCamelCase__ ) )
_a = float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) )
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(lowerCamelCase__ ) * 2.0 * progress )) )
return LambdaLR(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Optimizer, lowerCamelCase__ : int, lowerCamelCase__ : int, lowerCamelCase__ : int = 1, lowerCamelCase__ : int = -1 ):
def lr_lambda(lowerCamelCase__ : Optional[Any] ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1, lowerCamelCase__ ) )
_a = float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCamelCase__ ) * progress) % 1.0) )) )
return LambdaLR(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Dict, lowerCamelCase__ : Any=1e-7, lowerCamelCase__ : str=1.0, lowerCamelCase__ : Optional[int]=-1 ):
_a = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(lowerCamelCase__ : int ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1, lowerCamelCase__ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
_a = lr_init - lr_end
_a = num_training_steps - num_warmup_steps
_a = 1 - (current_step - num_warmup_steps) / decay_steps
_a = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
__snake_case : Union[str, Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def _lowercase ( lowerCamelCase__ : Union[str, SchedulerType], lowerCamelCase__ : Optimizer, lowerCamelCase__ : Optional[str] = None, lowerCamelCase__ : Optional[int] = None, lowerCamelCase__ : Optional[int] = None, lowerCamelCase__ : int = 1, lowerCamelCase__ : float = 1.0, lowerCamelCase__ : int = -1, ):
_a = SchedulerType(lowerCamelCase__ )
_a = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCamelCase__, last_epoch=lowerCamelCase__ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCamelCase__, step_rules=lowerCamelCase__, last_epoch=lowerCamelCase__ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCamelCase__, num_warmup_steps=lowerCamelCase__, last_epoch=lowerCamelCase__ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCamelCase__, num_warmup_steps=lowerCamelCase__, num_training_steps=lowerCamelCase__, num_cycles=lowerCamelCase__, last_epoch=lowerCamelCase__, )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCamelCase__, num_warmup_steps=lowerCamelCase__, num_training_steps=lowerCamelCase__, power=lowerCamelCase__, last_epoch=lowerCamelCase__, )
return schedule_func(
lowerCamelCase__, num_warmup_steps=lowerCamelCase__, num_training_steps=lowerCamelCase__, last_epoch=lowerCamelCase__ )
| 131 |
'''simple docstring'''
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class A ( a , a , a , unittest.TestCase ):
__UpperCAmelCase : List[Any] = StableDiffusionControlNetImgaImgPipeline
__UpperCAmelCase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
__UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__UpperCAmelCase : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"""control_image"""} )
__UpperCAmelCase : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
torch.manual_seed(0 )
_a = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
torch.manual_seed(0 )
_a = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
_a = CLIPTextModel(snake_case_ )
_a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_a = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __lowerCAmelCase ( self , snake_case_ , snake_case_=0 ) -> Optional[int]:
if str(snake_case_ ).startswith("mps" ):
_a = torch.manual_seed(snake_case_ )
else:
_a = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_a = 2
_a = randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=snake_case_ , device=torch.device(snake_case_ ) , )
_a = floats_tensor(control_image.shape , rng=random.Random(snake_case_ ) ).to(snake_case_ )
_a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(snake_case_ ) ).convert("RGB" ).resize((6_4, 6_4) )
_a = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def __lowerCAmelCase ( self ) -> str:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __lowerCAmelCase ( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class A ( a , a , unittest.TestCase ):
__UpperCAmelCase : Dict = StableDiffusionControlNetImgaImgPipeline
__UpperCAmelCase : int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
__UpperCAmelCase : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__UpperCAmelCase : List[Any] = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __lowerCAmelCase ( self ) -> str:
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
torch.manual_seed(0 )
def init_weights(snake_case_ ):
if isinstance(snake_case_ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
_a = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
controlneta.controlnet_down_blocks.apply(snake_case_ )
torch.manual_seed(0 )
_a = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
controlneta.controlnet_down_blocks.apply(snake_case_ )
torch.manual_seed(0 )
_a = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
_a = CLIPTextModel(snake_case_ )
_a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_a = MultiControlNetModel([controlneta, controlneta] )
_a = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __lowerCAmelCase ( self , snake_case_ , snake_case_=0 ) -> Optional[int]:
if str(snake_case_ ).startswith("mps" ):
_a = torch.manual_seed(snake_case_ )
else:
_a = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_a = 2
_a = [
randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=snake_case_ , device=torch.device(snake_case_ ) , ),
randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=snake_case_ , device=torch.device(snake_case_ ) , ),
]
_a = floats_tensor(control_image[0].shape , rng=random.Random(snake_case_ ) ).to(snake_case_ )
_a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(snake_case_ ) ).convert("RGB" ).resize((6_4, 6_4) )
_a = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.get_dummy_components()
_a = self.pipeline_class(**snake_case_ )
pipe.to(snake_case_ )
_a = 10.0
_a = 4
_a = self.get_dummy_inputs(snake_case_ )
_a = steps
_a = scale
_a = pipe(**snake_case_ )[0]
_a = self.get_dummy_inputs(snake_case_ )
_a = steps
_a = scale
_a = pipe(**snake_case_ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
_a = self.get_dummy_inputs(snake_case_ )
_a = steps
_a = scale
_a = pipe(**snake_case_ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
_a = self.get_dummy_inputs(snake_case_ )
_a = steps
_a = scale
_a = pipe(**snake_case_ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def __lowerCAmelCase ( self ) -> Optional[int]:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __lowerCAmelCase ( self ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> List[Any]:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_dummy_components()
_a = self.pipeline_class(**snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(snake_case_ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny" )
_a = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , safety_checker=snake_case_ , controlnet=snake_case_ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=snake_case_ )
_a = torch.Generator(device="cpu" ).manual_seed(0 )
_a = "evil space-punk bird"
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((5_1_2, 5_1_2) )
_a = load_image(
"https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((5_1_2, 5_1_2) )
_a = pipe(
snake_case_ , snake_case_ , control_image=snake_case_ , generator=snake_case_ , output_type="np" , num_inference_steps=5_0 , strength=0.6 , )
_a = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
_a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy" )
assert np.abs(expected_image - image ).max() < 9E-2
| 131 | 1 |
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
a : Optional[int] = logging.getLogger(__name__)
class _UpperCamelCase ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self , __lowercase=-1 ):
UpperCAmelCase__ = label_idx
def A__ ( self , __lowercase , __lowercase ):
if isinstance(__a , __a ):
UpperCAmelCase__ = mode.value
UpperCAmelCase__ = os.path.join(__a , F'''{mode}.txt''' )
UpperCAmelCase__ = 1
UpperCAmelCase__ = []
with open(__a , encoding="""utf-8""" ) as f:
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=__a , labels=__a ) )
guid_index += 1
UpperCAmelCase__ = []
UpperCAmelCase__ = []
else:
UpperCAmelCase__ = line.split(""" """ )
words.append(splits[0] )
if len(__a ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=__a , labels=__a ) )
return examples
def A__ ( self , __lowercase , __lowercase , __lowercase ):
UpperCAmelCase__ = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(__a )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
UpperCAmelCase__ = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(__a )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for \'%s\'.""" , line.split()[0] )
def A__ ( self , __lowercase ):
if path:
with open(__a , """r""" ) as f:
UpperCAmelCase__ = f.read().splitlines()
if "O" not in labels:
UpperCAmelCase__ = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class _UpperCamelCase ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self ):
super().__init__(label_idx=-2 )
def A__ ( self , __lowercase ):
if path:
with open(__a , """r""" ) as f:
UpperCAmelCase__ = f.read().splitlines()
if "O" not in labels:
UpperCAmelCase__ = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class _UpperCamelCase ( __UpperCamelCase ):
'''simple docstring'''
def A__ ( self , __lowercase , __lowercase ):
if isinstance(__a , __a ):
UpperCAmelCase__ = mode.value
UpperCAmelCase__ = os.path.join(__a , F'''{mode}.txt''' )
UpperCAmelCase__ = 1
UpperCAmelCase__ = []
with open(__a , encoding="""utf-8""" ) as f:
for sentence in parse_incr(__a ):
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(__a ) == len(__a )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=__a , labels=__a ) )
guid_index += 1
return examples
def A__ ( self , __lowercase , __lowercase , __lowercase ):
UpperCAmelCase__ = 0
for sentence in parse_incr(__a ):
UpperCAmelCase__ = preds_list[example_id]
UpperCAmelCase__ = """"""
for token in sentence:
out += F'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(__a )
example_id += 1
def A__ ( self , __lowercase ):
if path:
with open(__a , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 712 |
"""simple docstring"""
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
UpperCAmelCase__ = old_name
if "patch_embed" in old_name:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = old_name.split(""".""" )
if layer == "0":
UpperCAmelCase__ = old_name.replace("""0""" , """convolution1""" )
elif layer == "1":
UpperCAmelCase__ = old_name.replace("""1""" , """batchnorm_before""" )
elif layer == "3":
UpperCAmelCase__ = old_name.replace("""3""" , """convolution2""" )
else:
UpperCAmelCase__ = old_name.replace("""4""" , """batchnorm_after""" )
if "network" in old_name and re.search(r"""\d\.\d""" , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ = r"""\b\d{2}\b"""
if bool(re.search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase__ = re.search(r"""\d\.\d\d.""" , _SCREAMING_SNAKE_CASE ).group()
else:
UpperCAmelCase__ = re.search(r"""\d\.\d.""" , _SCREAMING_SNAKE_CASE ).group()
if int(match[0] ) < 6:
UpperCAmelCase__ = old_name.replace(_SCREAMING_SNAKE_CASE , """""" )
UpperCAmelCase__ = trimmed_name.replace("""network""" , match[0] + """.meta4D_layers.blocks.""" + match[2:-1] )
UpperCAmelCase__ = """intermediate_stages.""" + trimmed_name
else:
UpperCAmelCase__ = old_name.replace(_SCREAMING_SNAKE_CASE , """""" )
if int(match[2] ) < num_meta4D_last_stage:
UpperCAmelCase__ = trimmed_name.replace("""network""" , """meta4D_layers.blocks.""" + match[2] )
else:
UpperCAmelCase__ = str(int(match[2] ) - num_meta4D_last_stage )
UpperCAmelCase__ = trimmed_name.replace("""network""" , """meta3D_layers.blocks.""" + layer_index )
if "norm1" in old_name:
UpperCAmelCase__ = trimmed_name.replace("""norm1""" , """layernorm1""" )
elif "norm2" in old_name:
UpperCAmelCase__ = trimmed_name.replace("""norm2""" , """layernorm2""" )
elif "fc1" in old_name:
UpperCAmelCase__ = trimmed_name.replace("""fc1""" , """linear_in""" )
elif "fc2" in old_name:
UpperCAmelCase__ = trimmed_name.replace("""fc2""" , """linear_out""" )
UpperCAmelCase__ = """last_stage.""" + trimmed_name
elif "network" in old_name and re.search(r""".\d.""" , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ = old_name.replace("""network""" , """intermediate_stages""" )
if "fc" in new_name:
UpperCAmelCase__ = new_name.replace("""fc""" , """convolution""" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
UpperCAmelCase__ = new_name.replace("""norm1""" , """batchnorm_before""" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
UpperCAmelCase__ = new_name.replace("""norm2""" , """batchnorm_after""" )
if "proj" in new_name:
UpperCAmelCase__ = new_name.replace("""proj""" , """projection""" )
if "dist_head" in new_name:
UpperCAmelCase__ = new_name.replace("""dist_head""" , """distillation_classifier""" )
elif "head" in new_name:
UpperCAmelCase__ = new_name.replace("""head""" , """classifier""" )
elif "patch_embed" in new_name:
UpperCAmelCase__ = """efficientformer.""" + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
UpperCAmelCase__ = new_name.replace("""norm""" , """layernorm""" )
UpperCAmelCase__ = """efficientformer.""" + new_name
else:
UpperCAmelCase__ = """efficientformer.encoder.""" + new_name
return new_name
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
for key in checkpoint.copy().keys():
UpperCAmelCase__ = checkpoint.pop(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = val
return checkpoint
def snake_case__ ( ) ->Optional[Any]:
UpperCAmelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase__ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return image
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]:
UpperCAmelCase__ = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )["""model"""]
UpperCAmelCase__ = EfficientFormerConfig.from_json_file(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = EfficientFormerForImageClassificationWithTeacher(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = """_""".join(checkpoint_path.split("""/""" )[-1].split(""".""" )[0].split("""_""" )[:-1] )
UpperCAmelCase__ = config.depths[-1] - config.num_metaad_blocks + 1
UpperCAmelCase__ = convert_torch_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase__ = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
# prepare image
UpperCAmelCase__ = prepare_img()
UpperCAmelCase__ = 2_5_6
UpperCAmelCase__ = 2_2_4
UpperCAmelCase__ = EfficientFormerImageProcessor(
size={"""shortest_edge""": image_size} , crop_size={"""height""": crop_size, """width""": crop_size} , resample=pillow_resamplings["""bicubic"""] , )
UpperCAmelCase__ = processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
# original processing pipeline
UpperCAmelCase__ = Compose(
[
Resize(_SCREAMING_SNAKE_CASE , interpolation=pillow_resamplings["""bicubic"""] ),
CenterCrop(_SCREAMING_SNAKE_CASE ),
ToTensor(),
Normalize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
] )
UpperCAmelCase__ = image_transforms(_SCREAMING_SNAKE_CASE ).unsqueeze(0 )
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = model(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = outputs.logits
UpperCAmelCase__ = (1, 1_0_0_0)
if "l1" in model_name:
UpperCAmelCase__ = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :1_0] , _SCREAMING_SNAKE_CASE , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
UpperCAmelCase__ = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :1_0] , _SCREAMING_SNAKE_CASE , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
UpperCAmelCase__ = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print("""Pushing model to the hub...""" )
model.push_to_hub(
repo_id=F'''Bearnardd/{pytorch_dump_path}''' , commit_message="""Add model""" , use_temp_dir=_SCREAMING_SNAKE_CASE , )
processor.push_to_hub(
repo_id=F'''Bearnardd/{pytorch_dump_path}''' , commit_message="""Add image processor""" , use_temp_dir=_SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
a : Optional[int] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 422 | 0 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 210 |
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(""".""")
def lowerCamelCase__ ( A : str ):
'''simple docstring'''
UpperCAmelCase = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
f"""{test_file} instead.""" )
UpperCAmelCase = components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(f"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
f"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
UpperCAmelCase = components[:-1] + [test_fn.replace('''.py''' , '''''' )]
UpperCAmelCase = '''.'''.join(A )
return test_module_path
def lowerCamelCase__ ( A : Any ):
'''simple docstring'''
UpperCAmelCase = get_module_path(A )
UpperCAmelCase = importlib.import_module(A )
return test_module
def lowerCamelCase__ ( A : Tuple ):
'''simple docstring'''
UpperCAmelCase = []
UpperCAmelCase = get_test_module(A )
for attr in dir(A ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(A , A ) )
# sort with class names
return sorted(A , key=lambda A : x.__name__ )
def lowerCamelCase__ ( A : Any ):
'''simple docstring'''
UpperCAmelCase = []
UpperCAmelCase = get_test_module(A )
for attr in dir(A ):
UpperCAmelCase = getattr(A , A )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
UpperCAmelCase = getattr(A , '''all_model_classes''' , [] )
if len(A ) > 0:
test_classes.append(A )
# sort with class names
return sorted(A , key=lambda A : x.__name__ )
def lowerCamelCase__ ( A : int ):
'''simple docstring'''
UpperCAmelCase = get_test_classes(A )
UpperCAmelCase = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(A , key=lambda A : x.__name__ )
def lowerCamelCase__ ( A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = test_class()
if hasattr(A , '''setUp''' ):
test.setUp()
UpperCAmelCase = None
if hasattr(A , '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
UpperCAmelCase = test.model_tester.__class__
return model_tester
def lowerCamelCase__ ( A : Tuple , A : int ):
'''simple docstring'''
UpperCAmelCase = get_test_classes(A )
UpperCAmelCase = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(A )
# sort with class names
return sorted(A , key=lambda A : x.__name__ )
def lowerCamelCase__ ( A : Any , A : Tuple ):
'''simple docstring'''
UpperCAmelCase = get_test_classes_for_model(A , A )
UpperCAmelCase = []
for test_class in test_classes:
UpperCAmelCase = get_model_tester_from_test_class(A )
if tester_class is not None:
tester_classes.append(A )
# sort with class names
return sorted(A , key=lambda A : x.__name__ )
def lowerCamelCase__ ( A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = get_test_classes(A )
UpperCAmelCase = {test_class: get_model_tester_from_test_class(A ) for test_class in test_classes}
return test_tester_mapping
def lowerCamelCase__ ( A : Any ):
'''simple docstring'''
UpperCAmelCase = get_model_classes(A )
UpperCAmelCase = {
model_class: get_test_classes_for_model(A , A ) for model_class in model_classes
}
return model_test_mapping
def lowerCamelCase__ ( A : int ):
'''simple docstring'''
UpperCAmelCase = get_model_classes(A )
UpperCAmelCase = {
model_class: get_tester_classes_for_model(A , A ) for model_class in model_classes
}
return model_to_tester_mapping
def lowerCamelCase__ ( A : Dict ):
'''simple docstring'''
if isinstance(A , A ):
return o
elif isinstance(A , A ):
return o.__name__
elif isinstance(A , (list, tuple) ):
return [to_json(A ) for x in o]
elif isinstance(A , A ):
return {to_json(A ): to_json(A ) for k, v in o.items()}
else:
return o
| 210 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class _lowercase ( _A ):
_a : Tuple = 'audio-spectrogram-transformer'
def __init__( self , a=7_6_8 , a=1_2 , a=1_2 , a=3_0_7_2 , a="gelu" , a=0.0 , a=0.0 , a=0.02 , a=1e-12 , a=1_6 , a=True , a=1_0 , a=1_0 , a=1_0_2_4 , a=1_2_8 , **a , ):
super().__init__(**a )
snake_case__ : List[str] =hidden_size
snake_case__ : Union[str, Any] =num_hidden_layers
snake_case__ : Tuple =num_attention_heads
snake_case__ : Optional[int] =intermediate_size
snake_case__ : Any =hidden_act
snake_case__ : Tuple =hidden_dropout_prob
snake_case__ : str =attention_probs_dropout_prob
snake_case__ : Dict =initializer_range
snake_case__ : str =layer_norm_eps
snake_case__ : str =patch_size
snake_case__ : str =qkv_bias
snake_case__ : str =frequency_stride
snake_case__ : List[Any] =time_stride
snake_case__ : str =max_length
snake_case__ : Optional[Any] =num_mel_bins
| 448 |
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
__lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"""--original_config_file""",
default=None,
type=str,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--scheduler_type""",
default="""pndm""",
type=str,
help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""",
)
parser.add_argument(
"""--pipeline_type""",
default=None,
type=str,
help=(
"""The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"""
""". If `None` pipeline will be automatically inferred."""
),
)
parser.add_argument(
"""--image_size""",
default=None,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--prediction_type""",
default=None,
type=str,
help=(
"""The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"""
""" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
parser.add_argument(
"""--stable_unclip""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""",
)
parser.add_argument(
"""--stable_unclip_prior""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""",
)
parser.add_argument(
"""--clip_stats_path""",
type=str,
help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""",
required=False,
)
parser.add_argument(
"""--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint."""
)
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--vae_path""",
type=str,
default=None,
required=False,
help="""Set to a path, hub id to an already converted vae to not convert it again.""",
)
__lowerCamelCase : Optional[Any] = parser.parse_args()
__lowerCamelCase : Optional[int] = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 448 | 1 |
import socket
def __lowerCAmelCase ( ):
"""simple docstring"""
_lowercase = socket.socket(socket.AF_INET ,socket.SOCK_STREAM )
_lowercase = socket.gethostname()
_lowercase = 12_312
sock.connect((host, port) )
sock.send(B"""Hello server!""" )
with open("""Received_file""" ,"""wb""" ) as out_file:
print("""File opened""" )
print("""Receiving data...""" )
while True:
_lowercase = sock.recv(1_024 )
if not data:
break
out_file.write(_A )
print("""Successfully received the file""" )
sock.close()
print("""Connection closed""" )
if __name__ == "__main__":
main()
| 398 | def __lowerCAmelCase ( _A ):
"""simple docstring"""
if not isinstance(_A ,_A ):
_lowercase = f'''Input value of [number={number}] must be an integer'''
raise TypeError(_A )
if number < 0:
return False
_lowercase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 398 | 1 |
'''simple docstring'''
import os
import sys
import unittest
__snake_case : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
__snake_case : Optional[int] = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
__snake_case : List[str] = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
A__ : Optional[Any] =get_test_to_tester_mapping(lowerCAmelCase_ )
A__ : int =get_test_to_tester_mapping(lowerCAmelCase_ )
A__ : List[Any] ={"""BertModelTest""": """BertModelTester"""}
A__ : Any ={
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(get_test_info.to_json(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowercase__ ( self : str ) -> Dict:
'''simple docstring'''
A__ : Tuple =get_model_to_test_mapping(lowerCAmelCase_ )
A__ : List[str] =get_model_to_test_mapping(lowerCAmelCase_ )
A__ : Optional[Any] ={
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
A__ : int ={
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(get_test_info.to_json(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
A__ : int =get_model_to_tester_mapping(lowerCAmelCase_ )
A__ : List[str] =get_model_to_tester_mapping(lowerCAmelCase_ )
A__ : List[str] ={
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
A__ : Optional[Any] ={
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(get_test_info.to_json(lowerCAmelCase_ ) , lowerCAmelCase_ )
| 687 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__snake_case : List[Any] = logging.get_logger(__name__)
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'linear'
__snake_case = 'cosine'
__snake_case = 'cosine_with_restarts'
__snake_case = 'polynomial'
__snake_case = 'constant'
__snake_case = 'constant_with_warmup'
__snake_case = 'piecewise_constant'
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int = -1 ) -> List[str]:
"""simple docstring"""
return LambdaLR(__snake_case, lambda __snake_case : 1, last_epoch=__snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int = -1 ) -> Dict:
"""simple docstring"""
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1.0, __snake_case ) )
return 1.0
return LambdaLR(__snake_case, __snake_case, last_epoch=__snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : str, __snake_case : int = -1 ) -> Optional[Any]:
"""simple docstring"""
A__ : str ={}
A__ : Tuple =step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
A__ , A__ : int =rule_str.split(""":""" )
A__ : Optional[int] =int(__snake_case )
A__ : List[Any] =float(__snake_case )
A__ : Union[str, Any] =value
A__ : int =float(rule_list[-1] )
def create_rules_function(__snake_case : int, __snake_case : Dict ):
def rule_func(__snake_case : int ) -> float:
A__ : Any =sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__snake_case ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
A__ : Any =create_rules_function(__snake_case, __snake_case )
return LambdaLR(__snake_case, __snake_case, last_epoch=__snake_case )
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Dict, __snake_case : List[Any], __snake_case : Any=-1 ) -> int:
"""simple docstring"""
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
return max(
0.0, float(num_training_steps - current_step ) / float(max(1, num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__snake_case, __snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int, __snake_case : float = 0.5, __snake_case : int = -1 ) -> Dict:
"""simple docstring"""
def lr_lambda(__snake_case : Dict ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
A__ : List[str] =float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) )
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(__snake_case ) * 2.0 * progress )) )
return LambdaLR(__snake_case, __snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int, __snake_case : int = 1, __snake_case : int = -1 ) -> Dict:
"""simple docstring"""
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
A__ : Union[str, Any] =float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(__snake_case ) * progress) % 1.0) )) )
return LambdaLR(__snake_case, __snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : Optional[int], __snake_case : Optional[int]=1E-7, __snake_case : List[Any]=1.0, __snake_case : Any=-1 ) -> List[Any]:
"""simple docstring"""
A__ : Optional[int] =optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})" )
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
A__ : List[Any] =lr_init - lr_end
A__ : Any =num_training_steps - num_warmup_steps
A__ : Tuple =1 - (current_step - num_warmup_steps) / decay_steps
A__ : List[str] =lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__snake_case, __snake_case, __snake_case )
__snake_case : int = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __lowerCamelCase ( __snake_case : Union[str, SchedulerType], __snake_case : Optimizer, __snake_case : Optional[str] = None, __snake_case : Optional[int] = None, __snake_case : Optional[int] = None, __snake_case : int = 1, __snake_case : float = 1.0, __snake_case : int = -1, ) -> Tuple:
"""simple docstring"""
A__ : Tuple =SchedulerType(__snake_case )
A__ : List[Any] =TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__snake_case, last_epoch=__snake_case )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__snake_case, step_rules=__snake_case, last_epoch=__snake_case )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument." )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__snake_case, num_warmup_steps=__snake_case, last_epoch=__snake_case )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"{name} requires `num_training_steps`, please provide that argument." )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, num_cycles=__snake_case, last_epoch=__snake_case, )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, power=__snake_case, last_epoch=__snake_case, )
return schedule_func(
__snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, last_epoch=__snake_case )
| 687 | 1 |
'''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__snake_case : List[Any] = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__snake_case : Optional[int] = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [1_92, 1_92 * 2, 1_92 * 3, 1_92 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__snake_case : int = {
'''sample_size''': 2_56,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [2_56, 2_56, 2_56 * 2, 2_56 * 2, 2_56 * 4, 2_56 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__snake_case : Dict = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
__snake_case : Tuple = {
'''num_train_timesteps''': 2_01,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
__snake_case : str = {
'''num_train_timesteps''': 1_51,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def lowerCamelCase__ ( A_ ):
if isinstance(A_ , A_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected" )
def lowerCamelCase__ ( A_ , A_ , A_ , A_ , A_=False ):
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.0.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.0.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.2.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.2.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.emb_layers.1.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.emb_layers.1.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.0.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.0.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.3.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.skip_connection.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def lowerCamelCase__ ( A_ , A_ , A_ , A_ , A_=None ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = checkpoint[F"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = checkpoint[F"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.norm.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.norm.bias"""]
UpperCAmelCase_ = weight_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = weight_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = weight_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = (
checkpoint[F"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowerCamelCase__ ( A_ , A_ ):
UpperCAmelCase_ = torch.load(A_ , map_location="cpu" )
UpperCAmelCase_ = {}
UpperCAmelCase_ = checkpoint["time_embed.0.weight"]
UpperCAmelCase_ = checkpoint["time_embed.0.bias"]
UpperCAmelCase_ = checkpoint["time_embed.2.weight"]
UpperCAmelCase_ = checkpoint["time_embed.2.bias"]
if unet_config["num_class_embeds"] is not None:
UpperCAmelCase_ = checkpoint["label_emb.weight"]
UpperCAmelCase_ = checkpoint["input_blocks.0.0.weight"]
UpperCAmelCase_ = checkpoint["input_blocks.0.0.bias"]
UpperCAmelCase_ = unet_config["down_block_types"]
UpperCAmelCase_ = unet_config["layers_per_block"]
UpperCAmelCase_ = unet_config["attention_head_dim"]
UpperCAmelCase_ = unet_config["block_out_channels"]
UpperCAmelCase_ = 1
UpperCAmelCase_ = channels_list[0]
for i, layer_type in enumerate(A_ ):
UpperCAmelCase_ = channels_list[i]
UpperCAmelCase_ = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(A_ ):
UpperCAmelCase_ = F"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(A_ ):
UpperCAmelCase_ = F"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
UpperCAmelCase_ = F"""down_blocks.{i}.attentions.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.1"""
UpperCAmelCase_ = convert_attention(
A_ , A_ , A_ , A_ , A_ )
current_layer += 1
if i != len(A_ ) - 1:
UpperCAmelCase_ = F"""down_blocks.{i}.downsamplers.0"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
current_layer += 1
UpperCAmelCase_ = current_channels
# hardcoded the mid-block for now
UpperCAmelCase_ = "mid_block.resnets.0"
UpperCAmelCase_ = "middle_block.0"
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
UpperCAmelCase_ = "mid_block.attentions.0"
UpperCAmelCase_ = "middle_block.1"
UpperCAmelCase_ = convert_attention(A_ , A_ , A_ , A_ , A_ )
UpperCAmelCase_ = "mid_block.resnets.1"
UpperCAmelCase_ = "middle_block.2"
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
UpperCAmelCase_ = 0
UpperCAmelCase_ = unet_config["up_block_types"]
for i, layer_type in enumerate(A_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ = F"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
current_layer += 1
if i != len(A_ ) - 1:
UpperCAmelCase_ = F"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase_ = F"""output_blocks.{current_layer-1}.1"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ = F"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
UpperCAmelCase_ = F"""up_blocks.{i}.attentions.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.1"""
UpperCAmelCase_ = convert_attention(
A_ , A_ , A_ , A_ , A_ )
current_layer += 1
if i != len(A_ ) - 1:
UpperCAmelCase_ = F"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase_ = F"""output_blocks.{current_layer-1}.2"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
UpperCAmelCase_ = checkpoint["out.0.weight"]
UpperCAmelCase_ = checkpoint["out.0.bias"]
UpperCAmelCase_ = checkpoint["out.2.weight"]
UpperCAmelCase_ = checkpoint["out.2.bias"]
return new_checkpoint
if __name__ == "__main__":
__snake_case : List[str] = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
__snake_case : List[str] = parser.parse_args()
__snake_case : Any = strabool(args.class_cond)
__snake_case : List[str] = os.path.basename(args.unet_path)
print(F'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
__snake_case : Optional[int] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__snake_case : Union[str, Any] = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__snake_case : List[str] = TEST_UNET_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
__snake_case : Optional[Any] = None
__snake_case : Optional[int] = con_pt_to_diffuser(args.unet_path, unet_config)
__snake_case : str = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__snake_case : Tuple = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__snake_case : Optional[int] = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__snake_case : Union[str, Any] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
__snake_case : Optional[Any] = CMStochasticIterativeScheduler(**scheduler_config)
__snake_case : Dict = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 660 | '''simple docstring'''
def lowerCamelCase__ ( A_ , A_ ):
_validate_point(A_ )
_validate_point(A_ )
if len(A_ ) != len(A_ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(A_ , A_ ) ) )
def lowerCamelCase__ ( A_ ):
if point:
if isinstance(A_ , A_ ):
for item in point:
if not isinstance(A_ , (int, float) ):
UpperCAmelCase_ = (
"Expected a list of numbers as input, found "
F"""{type(A_ ).__name__}"""
)
raise TypeError(A_ )
else:
UpperCAmelCase_ = F"""Expected a list of numbers as input, found {type(A_ ).__name__}"""
raise TypeError(A_ )
else:
raise ValueError("Missing an input" )
def lowerCamelCase__ ( A_ , A_ ):
_validate_point(A_ )
_validate_point(A_ )
if len(A_ ) != len(A_ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(A_ , A_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | 1 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class lowerCamelCase :
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=99 , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=9 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase=8 , __UpperCamelCase=0.1 , __UpperCamelCase=0.002 , __UpperCamelCase=1 , __UpperCamelCase=0 , __UpperCamelCase=0 , __UpperCamelCase=None , __UpperCamelCase=None , ):
A_ = parent
A_ = batch_size
A_ = encoder_seq_length
A_ = decoder_seq_length
# For common tests
A_ = self.decoder_seq_length
A_ = is_training
A_ = use_attention_mask
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = d_ff
A_ = relative_attention_num_buckets
A_ = dropout_rate
A_ = initializer_factor
A_ = eos_token_id
A_ = pad_token_id
A_ = decoder_start_token_id
A_ = None
A_ = decoder_layers
def lowercase_ ( self ):
return TaConfig.from_pretrained("google/umt5-base" )
def lowercase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , ):
if attention_mask is None:
A_ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
A_ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
A_ = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__UpperCamelCase )
if decoder_head_mask is None:
A_ = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__UpperCamelCase )
if cross_attn_head_mask is None:
A_ = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__UpperCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def lowercase_ ( self ):
A_ = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
A_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
A_ = input_ids.clamp(self.pad_token_id + 1 )
A_ = decoder_input_ids.clamp(self.pad_token_id + 1 )
A_ = self.get_config()
A_ = config.num_attention_heads
A_ = self.prepare_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, input_dict
def lowercase_ ( self ):
A_ , A_ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase_ ( self ):
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowercase_ ( self ):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowercase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
A_ = UMTaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A_ = model(
input_ids=__UpperCamelCase , decoder_input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase , decoder_attention_mask=__UpperCamelCase , )
A_ = model(input_ids=__UpperCamelCase , decoder_input_ids=__UpperCamelCase )
A_ = result.last_hidden_state
A_ = result.past_key_values
A_ = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__UpperCamelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def lowercase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
A_ = UMTaModel(config=__UpperCamelCase ).get_decoder().to(__UpperCamelCase ).eval()
# first forward pass
A_ = model(__UpperCamelCase , use_cache=__UpperCamelCase )
A_ = model(__UpperCamelCase )
A_ = model(__UpperCamelCase , use_cache=__UpperCamelCase )
self.parent.assertTrue(len(__UpperCamelCase ) == len(__UpperCamelCase ) )
self.parent.assertTrue(len(__UpperCamelCase ) == len(__UpperCamelCase ) + 1 )
A_ , A_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A_ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
A_ = torch.cat([input_ids, next_tokens] , dim=-1 )
A_ = model(__UpperCamelCase )["last_hidden_state"]
A_ = model(__UpperCamelCase , past_key_values=__UpperCamelCase )["last_hidden_state"]
# select random slice
A_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A_ = output_from_no_past[:, -1, random_slice_idx].detach()
A_ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 ) )
def lowercase_ ( self , __UpperCamelCase , __UpperCamelCase , ):
A_ = UMTaModel(config=__UpperCamelCase ).to(__UpperCamelCase ).half().eval()
A_ = model(**__UpperCamelCase )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(__UpperCamelCase ).any().item() )
@require_torch
class lowerCamelCase ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
lowerCAmelCase_ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
lowerCAmelCase_ = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = True
lowerCAmelCase_ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
lowerCAmelCase_ = [0.8, 0.9]
def lowercase_ ( self ):
A_ = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def lowercase_ ( self ):
A_ = self.model_tester.prepare_config_and_inputs()
A_ = UMTaModel(config_and_inputs[0] ).to(__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__UpperCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'{tmpdirname}/t5_test.onnx' , export_params=__UpperCamelCase , opset_version=9 , input_names=["input_ids", "decoder_input_ids"] , )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def lowercase_ ( self ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__UpperCamelCase )
def lowercase_ ( self ):
A_ = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
A_ = self.model_tester.prepare_config_and_inputs()
A_ = config_and_inputs[0]
A_ = UMTaForConditionalGeneration(__UpperCamelCase ).eval()
model.to(__UpperCamelCase )
A_ = {
"head_mask": torch.zeros(config.num_layers , config.num_heads , device=__UpperCamelCase ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCamelCase ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCamelCase ),
}
for attn_name, (name, mask) in zip(__UpperCamelCase , head_masking.items() ):
A_ = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
A_ = torch.ones(
config.num_decoder_layers , config.num_heads , device=__UpperCamelCase )
A_ = model.generate(
config_and_inputs[1]["input_ids"] , num_beams=1 , max_length=3 , output_attentions=__UpperCamelCase , return_dict_in_generate=__UpperCamelCase , **__UpperCamelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
A_ = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def lowercase_ ( self ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def lowercase_ ( self ):
A_ = UMTaForConditionalGeneration.from_pretrained("google/umt5-small" , return_dict=__UpperCamelCase ).to(__UpperCamelCase )
A_ = AutoTokenizer.from_pretrained("google/umt5-small" , use_fast=__UpperCamelCase , legacy=__UpperCamelCase )
A_ = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
A_ = tokenizer(__UpperCamelCase , return_tensors="pt" , padding=__UpperCamelCase ).input_ids
# fmt: off
A_ = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(__UpperCamelCase , __UpperCamelCase )
A_ = model.generate(input_ids.to(__UpperCamelCase ) )
A_ = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
A_ = tokenizer.batch_decode(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
| 608 |
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def lowerCAmelCase ( snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Any=1e-12 )-> List[str]:
A_ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(snake_case__ , axis=1 ) , a_min=snake_case__ ) ).T
A_ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(snake_case__ , axis=1 ) , a_min=snake_case__ ) ).T
return jnp.matmul(snake_case__ , norm_emb_a.T )
class lowerCamelCase ( nn.Module ):
"""simple docstring"""
lowerCAmelCase_ = 42
lowerCAmelCase_ = jnp.floataa
def lowercase_ ( self ):
A_ = FlaxCLIPVisionModule(self.config.vision_config )
A_ = nn.Dense(self.config.projection_dim , use_bias=__UpperCamelCase , dtype=self.dtype )
A_ = self.param("concept_embeds" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
A_ = self.param(
"special_care_embeds" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
A_ = self.param("concept_embeds_weights" , jax.nn.initializers.ones , (17,) )
A_ = self.param("special_care_embeds_weights" , jax.nn.initializers.ones , (3,) )
def __call__( self , __UpperCamelCase ):
A_ = self.vision_model(__UpperCamelCase )[1]
A_ = self.visual_projection(__UpperCamelCase )
A_ = jax_cosine_distance(__UpperCamelCase , self.special_care_embeds )
A_ = jax_cosine_distance(__UpperCamelCase , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
A_ = 0.0
A_ = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
A_ = jnp.round(__UpperCamelCase , 3 )
A_ = jnp.any(special_scores > 0 , axis=1 , keepdims=__UpperCamelCase )
# Use a lower threshold if an image has any special care concept
A_ = is_special_care * 0.01
A_ = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
A_ = jnp.round(__UpperCamelCase , 3 )
A_ = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class lowerCamelCase ( __snake_case ):
"""simple docstring"""
lowerCAmelCase_ = CLIPConfig
lowerCAmelCase_ = """clip_input"""
lowerCAmelCase_ = FlaxStableDiffusionSafetyCheckerModule
def __init__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = 0 , __UpperCamelCase = jnp.floataa , __UpperCamelCase = True , **__UpperCamelCase , ):
if input_shape is None:
A_ = (1, 224, 224, 3)
A_ = self.module_class(config=__UpperCamelCase , dtype=__UpperCamelCase , **__UpperCamelCase )
super().__init__(__UpperCamelCase , __UpperCamelCase , input_shape=__UpperCamelCase , seed=__UpperCamelCase , dtype=__UpperCamelCase , _do_init=_do_init )
def lowercase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None ):
# init input tensor
A_ = jax.random.normal(__UpperCamelCase , __UpperCamelCase )
A_ , A_ = jax.random.split(__UpperCamelCase )
A_ = {"params": params_rng, "dropout": dropout_rng}
A_ = self.module.init(__UpperCamelCase , __UpperCamelCase )["params"]
return random_params
def __call__( self , __UpperCamelCase , __UpperCamelCase = None , ):
A_ = jnp.transpose(__UpperCamelCase , (0, 2, 3, 1) )
return self.module.apply(
{"params": params or self.params} , jnp.array(__UpperCamelCase , dtype=jnp.floataa ) , rngs={} , )
| 608 | 1 |
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def __snake_case ( UpperCamelCase__ ) -> str:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
A = precision
A = ceil(precision / 14 )
A = 426880 * Decimal(10005 ).sqrt()
A = 1
A = 13591409
A = Decimal(__snake_case )
for k in range(1 , __snake_case ):
A = factorial(6 * k ) // (factorial(3 * k ) * factorial(__snake_case ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
UpperCamelCase : Tuple = 50
print(F"""The first {n} digits of pi is: {pi(n)}""")
| 690 |
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class a__ ( a_, a_, unittest.TestCase ):
__lowerCAmelCase = IFPipeline
__lowerCAmelCase = TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
__lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
__lowerCAmelCase = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __magic_name__ ( self ):
return self._get_dummy_components()
def __magic_name__ ( self , _a , _a=0 ):
if str(_a ).startswith("mps" ):
lowercase : List[str] = torch.manual_seed(_a )
else:
lowercase : Dict = torch.Generator(device=_a ).manual_seed(_a )
lowercase : str = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __magic_name__ ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def __magic_name__ ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __magic_name__ ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __magic_name__ ( self ):
self._test_save_load_local()
def __magic_name__ ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __magic_name__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def __magic_name__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self ):
# if
lowercase : Tuple = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa )
lowercase : List[str] = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=_a , tokenizer=_a )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
lowercase , lowercase : int = pipe_a.encode_prompt("anime turtle" , device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
lowercase : List[str] = None
lowercase : Union[str, Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(_a , _a , _a , _a )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
lowercase : Optional[int] = IFImgaImgPipeline(**pipe_a.components )
lowercase : Dict = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(_a , _a , _a , _a )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
lowercase : List[str] = IFInpaintingPipeline(**pipe_a.components )
lowercase : Optional[Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(_a , _a , _a , _a )
def __magic_name__ ( self , _a , _a , _a , _a ):
# pipeline 1
_start_torch_memory_measurement()
lowercase : Optional[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase : Optional[Any] = pipe_a(
prompt_embeds=_a , negative_prompt_embeds=_a , num_inference_steps=2 , generator=_a , output_type="np" , )
lowercase : Union[str, Any] = output.images[0]
assert image.shape == (64, 64, 3)
lowercase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
lowercase : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(_a , _a )
# pipeline 2
_start_torch_memory_measurement()
lowercase : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_a )
lowercase : Optional[int] = pipe_a(
prompt_embeds=_a , negative_prompt_embeds=_a , image=_a , generator=_a , num_inference_steps=2 , output_type="np" , )
lowercase : Union[str, Any] = output.images[0]
assert image.shape == (256, 256, 3)
lowercase : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowercase : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(_a , _a )
def __magic_name__ ( self , _a , _a , _a , _a ):
# pipeline 1
_start_torch_memory_measurement()
lowercase : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_a )
lowercase : Union[str, Any] = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase : Union[str, Any] = pipe_a(
prompt_embeds=_a , negative_prompt_embeds=_a , image=_a , num_inference_steps=2 , generator=_a , output_type="np" , )
lowercase : Optional[Any] = output.images[0]
assert image.shape == (64, 64, 3)
lowercase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
lowercase : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(_a , _a )
# pipeline 2
_start_torch_memory_measurement()
lowercase : Dict = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase : Dict = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(_a )
lowercase : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_a )
lowercase : Dict = pipe_a(
prompt_embeds=_a , negative_prompt_embeds=_a , image=_a , original_image=_a , generator=_a , num_inference_steps=2 , output_type="np" , )
lowercase : int = output.images[0]
assert image.shape == (256, 256, 3)
lowercase : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowercase : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(_a , _a )
def __magic_name__ ( self , _a , _a , _a , _a ):
# pipeline 1
_start_torch_memory_measurement()
lowercase : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_a )
lowercase : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(_a )
lowercase : Union[str, Any] = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase : Dict = pipe_a(
prompt_embeds=_a , negative_prompt_embeds=_a , image=_a , mask_image=_a , num_inference_steps=2 , generator=_a , output_type="np" , )
lowercase : Dict = output.images[0]
assert image.shape == (64, 64, 3)
lowercase : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
lowercase : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(_a , _a )
# pipeline 2
_start_torch_memory_measurement()
lowercase : Union[str, Any] = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_a )
lowercase : Union[str, Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(_a )
lowercase : str = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(_a )
lowercase : Dict = pipe_a(
prompt_embeds=_a , negative_prompt_embeds=_a , image=_a , mask_image=_a , original_image=_a , generator=_a , num_inference_steps=2 , output_type="np" , )
lowercase : Union[str, Any] = output.images[0]
assert image.shape == (256, 256, 3)
lowercase : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowercase : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(_a , _a )
def __magic_name__ ( ) -> Tuple:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 361 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowercase (_snake_case ) -> list[int]:
'''simple docstring'''
return [ord(_snake_case ) - 96 for elem in plain]
def lowercase (_snake_case ) -> str:
'''simple docstring'''
return "".join(chr(elem + 96 ) for elem in encoded )
def lowercase () -> None:
'''simple docstring'''
__UpperCamelCase = encode(input("-> " ).strip().lower() )
print("Encoded: " ,_snake_case )
print("Decoded:" ,decode(_snake_case ) )
if __name__ == "__main__":
main() | 705 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self : List[str] )-> Tuple:
__UpperCamelCase = inspect.getfile(accelerate.test_utils )
__UpperCamelCase = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps", "test_metrics.py"] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
__UpperCamelCase = test_metrics
@require_cpu
def A ( self : List[str] )-> Tuple:
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def A ( self : Optional[Any] )-> Union[str, Any]:
debug_launcher(self.test_metrics.main )
@require_single_gpu
def A ( self : Tuple )-> Optional[int]:
self.test_metrics.main()
@require_multi_gpu
def A ( self : str )-> List[Any]:
print(f"""Found {torch.cuda.device_count()} devices.""" )
__UpperCamelCase = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A_ , env=os.environ.copy() ) | 228 | 0 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
lowercase_ = logging.get_logger(__name__)
lowercase_ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
lowercase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __A :
'''simple docstring'''
__lowerCamelCase : str = field(
default=A , metadata={'help': 'Model type selected in the list: ' + ', '.join(A )} )
__lowerCamelCase : str = field(
default=A , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
__lowerCamelCase : int = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase : int = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
__lowerCamelCase : int = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
__lowerCamelCase : int = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
__lowerCamelCase : bool = field(
default=A , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__lowerCamelCase : bool = field(
default=A , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
__lowerCamelCase : float = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
__lowerCamelCase : int = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
__lowerCamelCase : int = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
__lowerCamelCase : int = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : int = 'train'
__lowerCamelCase : Union[str, Any] = 'dev'
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : SquadDataTrainingArguments
__lowerCamelCase : List[SquadFeatures]
__lowerCamelCase : Split
__lowerCamelCase : bool
def __init__(self , A , A , A = None , A = Split.train , A = False , A = None , A = "pt" , ) -> int:
"""simple docstring"""
_a = args
_a = is_language_sensitive
_a = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(A , A ):
try:
_a = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
_a = mode
# Load data features from cache or dataset file
_a = '''v2''' if args.version_2_with_negative else '''v1'''
_a = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_a = cached_features_file + '''.lock'''
with FileLock(A ):
if os.path.exists(A ) and not args.overwrite_cache:
_a = time.time()
_a = torch.load(A )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_a = self.old_features['''features''']
_a = self.old_features.get('''dataset''' , A )
_a = self.old_features.get('''examples''' , A )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
''' future run''' )
else:
if mode == Split.dev:
_a = self.processor.get_dev_examples(args.data_dir )
else:
_a = self.processor.get_train_examples(args.data_dir )
_a , _a = squad_convert_examples_to_features(
examples=self.examples , tokenizer=A , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=A , )
_a = time.time()
torch.save(
{'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples} , A , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__(self ) -> List[Any]:
"""simple docstring"""
return len(self.features )
def __getitem__(self , A ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_a = self.features[i]
_a = torch.tensor(feature.input_ids , dtype=torch.long )
_a = torch.tensor(feature.attention_mask , dtype=torch.long )
_a = torch.tensor(feature.token_type_ids , dtype=torch.long )
_a = torch.tensor(feature.cls_index , dtype=torch.long )
_a = torch.tensor(feature.p_mask , dtype=torch.float )
_a = torch.tensor(feature.is_impossible , dtype=torch.float )
_a = {
'''input_ids''': input_ids,
'''attention_mask''': attention_mask,
'''token_type_ids''': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'''is_impossible''': is_impossible} )
if self.is_language_sensitive:
inputs.update({'''langs''': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_a = torch.tensor(feature.start_position , dtype=torch.long )
_a = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} )
return inputs
| 11 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _lowercase ( ):
__A : Dict = ArgumentParser('Accelerate CLI tool', usage='accelerate <command> [<args>]', allow_abbrev=UpperCamelCase__ )
__A : Any = parser.add_subparsers(help='accelerate command helpers' )
# Register commands
get_config_parser(subparsers=UpperCamelCase__ )
env_command_parser(subparsers=UpperCamelCase__ )
launch_command_parser(subparsers=UpperCamelCase__ )
tpu_command_parser(subparsers=UpperCamelCase__ )
test_command_parser(subparsers=UpperCamelCase__ )
# Let's go
__A : Optional[Any] = parser.parse_args()
if not hasattr(UpperCamelCase__, 'func' ):
parser.print_help()
exit(1 )
# Run
args.func(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 365 | 0 |
'''simple docstring'''
lowerCAmelCase : Tuple = [
"""Audio""",
"""Array2D""",
"""Array3D""",
"""Array4D""",
"""Array5D""",
"""ClassLabel""",
"""Features""",
"""Sequence""",
"""Value""",
"""Image""",
"""Translation""",
"""TranslationVariableLanguages""",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 720 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def lowercase (_A = "" , ):
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def lowercase (_A = "" ):
"""simple docstring"""
if len(_A ) == 0:
return True
_lowerCAmelCase : Union[str, Any] = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_lowerCAmelCase : dict[str, int] = {}
for character in lower_case_input_str:
_lowerCAmelCase : Union[str, Any] = character_freq_dict.get(_A , 0 ) + 1
_lowerCAmelCase : List[Any] = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def lowercase (_A = "" ):
"""simple docstring"""
print('\nFor string = ' , _A , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(_A ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(_A ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
lowerCAmelCase : Tuple = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
lowerCAmelCase : Optional[Any] = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F'''{check_str} can {"" if status else "not "}be rearranged as a palindrome''')
| 630 | 0 |
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
lowerCAmelCase__ : Tuple =logging.get_logger(__name__)
def __lowercase ( a__ , a__ , a__ ) -> None:
__SCREAMING_SNAKE_CASE = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(a__ ) == len(a__ ), f"""{len(a__ )} != {len(a__ )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
lowerCAmelCase__ : Union[str, Any] ={
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
lowerCAmelCase__ : Optional[Any] ={
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def __lowercase ( a__ , a__ ) -> Tuple:
try:
__SCREAMING_SNAKE_CASE = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
f""" {n_student}""" )
return list(range(a__ ) )
def __lowercase ( a__ , a__ ) -> List[int]:
if n_student > n_teacher:
raise ValueError(f"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(a__ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def __lowercase ( a__ , a__ = "student" , a__ = None , a__ = None , a__=False , a__=None , a__=None , **a__ , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
__SCREAMING_SNAKE_CASE = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(a__ , a__ ):
AutoTokenizer.from_pretrained(a__ ).save_pretrained(a__ ) # purely for convenience
__SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(a__ ).eval()
else:
assert isinstance(a__ , a__ ), f"""teacher must be a model or string got type {type(a__ )}"""
__SCREAMING_SNAKE_CASE = teacher.config.to_diff_dict()
try:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
__SCREAMING_SNAKE_CASE = teacher_e
if d is None:
__SCREAMING_SNAKE_CASE = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
__SCREAMING_SNAKE_CASE = teacher_e
if d is None:
__SCREAMING_SNAKE_CASE = teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(a__ )
# Copy weights
__SCREAMING_SNAKE_CASE = teacher.config_class(**a__ )
__SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_config(a__ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
__SCREAMING_SNAKE_CASE = student.load_state_dict(teacher.state_dict() , strict=a__ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = list(range(a__ ) ), list(range(a__ ) )
logger.info(
f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
f""" {save_path}""" )
student.save_pretrained(a__ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
__SCREAMING_SNAKE_CASE = pick_layers_to_copy(a__ , a__ )
if d_layers_to_copy is None:
__SCREAMING_SNAKE_CASE = pick_layers_to_copy(a__ , a__ )
try:
if hasattr(
a__ , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , a__ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , a__ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , a__ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , a__ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , a__ )
copy_layers(teacher.decoder.block , student.decoder.block , a__ )
logger.info(
f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
__SCREAMING_SNAKE_CASE = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(a__ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 148 |
# Algorithm for the pigeonhole sorting
def __lowercase ( a__ ) -> Tuple:
__SCREAMING_SNAKE_CASE = min(a__ ) # min() finds the minimum value
__SCREAMING_SNAKE_CASE = max(a__ ) # max() finds the maximum value
__SCREAMING_SNAKE_CASE = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
__SCREAMING_SNAKE_CASE = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(a__ , a__ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
__SCREAMING_SNAKE_CASE = 0
for count in range(a__ ):
while holes[count] > 0:
holes[count] -= 1
__SCREAMING_SNAKE_CASE = count + min_val
i += 1
def __lowercase ( ) -> List[str]:
__SCREAMING_SNAKE_CASE = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(a__ )
print('Sorted order is:' , ' '.join(a__ ) )
if __name__ == "__main__":
main()
| 148 | 1 |
"""simple docstring"""
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : torch.FloatTensor
__lowercase : Optional[torch.FloatTensor] = None
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_=0.999 , UpperCamelCase_="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCamelCase_ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCamelCase_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" )
__SCREAMING_SNAKE_CASE = []
for i in range(UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = i / num_diffusion_timesteps
__SCREAMING_SNAKE_CASE = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCamelCase_ ) / alpha_bar_fn(UpperCamelCase_ ) , UpperCamelCase_ ) )
return torch.tensor(UpperCamelCase_ , dtype=torch.floataa )
class SCREAMING_SNAKE_CASE_ ( __a , __a ):
"""simple docstring"""
__lowercase : Any = 1
@register_to_config
def __init__( self , lowerCAmelCase__ = 1_0_0_0 , lowerCAmelCase__ = 0.00_01 , lowerCAmelCase__ = 0.02 , lowerCAmelCase__ = "linear" , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = True , lowerCAmelCase__ = 0 , lowerCAmelCase__ = "epsilon" , lowerCAmelCase__ = 1.0 , **lowerCAmelCase__ , ):
if kwargs.get("""set_alpha_to_one""" , lowerCAmelCase__) is not None:
__SCREAMING_SNAKE_CASE = (
"""The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."""
)
deprecate("""set_alpha_to_one""" , """1.0.0""" , lowerCAmelCase__ , standard_warn=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = kwargs["""set_alpha_to_one"""]
if trained_betas is not None:
__SCREAMING_SNAKE_CASE = torch.tensor(lowerCAmelCase__ , dtype=torch.floataa)
elif beta_schedule == "linear":
__SCREAMING_SNAKE_CASE = torch.linspace(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , dtype=torch.floataa)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__SCREAMING_SNAKE_CASE = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCAmelCase__ , dtype=torch.floataa) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__SCREAMING_SNAKE_CASE = betas_for_alpha_bar(lowerCAmelCase__)
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
__SCREAMING_SNAKE_CASE = 1.0 - self.betas
__SCREAMING_SNAKE_CASE = torch.cumprod(self.alphas , dim=0)
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
__SCREAMING_SNAKE_CASE = torch.tensor(0.0) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
__SCREAMING_SNAKE_CASE = 1.0
# setable values
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = torch.from_numpy(np.arange(0 , lowerCAmelCase__).copy().astype(np.intaa))
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None):
return sample
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None):
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"
f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
f" maximal {self.config.num_train_timesteps} timesteps.")
__SCREAMING_SNAKE_CASE = num_inference_steps
__SCREAMING_SNAKE_CASE = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__SCREAMING_SNAKE_CASE = (np.arange(0 , lowerCAmelCase__) * step_ratio).round().copy().astype(np.intaa)
__SCREAMING_SNAKE_CASE = torch.from_numpy(lowerCAmelCase__).to(lowerCAmelCase__)
self.timesteps += self.config.steps_offset
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = True , ):
# 1. get previous step value (=t+1)
__SCREAMING_SNAKE_CASE = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
__SCREAMING_SNAKE_CASE = self.alphas_cumprod[timestep]
__SCREAMING_SNAKE_CASE = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
__SCREAMING_SNAKE_CASE = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
__SCREAMING_SNAKE_CASE = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
__SCREAMING_SNAKE_CASE = model_output
elif self.config.prediction_type == "sample":
__SCREAMING_SNAKE_CASE = model_output
__SCREAMING_SNAKE_CASE = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
__SCREAMING_SNAKE_CASE = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
__SCREAMING_SNAKE_CASE = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
""" `v_prediction`""")
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
__SCREAMING_SNAKE_CASE = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range)
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__SCREAMING_SNAKE_CASE = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__SCREAMING_SNAKE_CASE = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__)
def __len__( self):
return self.config.num_train_timesteps
| 721 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = len(UpperCamelCase_ )
for i in range(UpperCamelCase_ ):
for j in range(i + 1 , UpperCamelCase_ ):
if numbers[j] < numbers[i]:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
__magic_name__ = input("Enter numbers separated by a comma:\n").strip()
__magic_name__ = [int(item) for item in user_input.split(",")]
print(exchange_sort(unsorted))
| 248 | 0 |
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
UpperCAmelCase_ =[p / w for p, w in zip(lowercase__ , lowercase__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
UpperCAmelCase_ =sorted(lowercase__ )
# declaring useful variables
UpperCAmelCase_ =len(lowercase__ )
UpperCAmelCase_ =0
UpperCAmelCase_ =0
UpperCAmelCase_ =0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
UpperCAmelCase_ =sorted_profit_by_weight[length - i - 1]
UpperCAmelCase_ =profit_by_weight.index(lowercase__ )
UpperCAmelCase_ =-1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"""Input profits, weights, and then max_weight (all positive ints) separated by """
"""spaces."""
)
__lowercase : List[str] =[int(x) for x in input("""Input profits separated by spaces: """).split()]
__lowercase : Union[str, Any] =[int(x) for x in input("""Input weights separated by spaces: """).split()]
__lowercase : Tuple =int(input("""Max weight allowed: """))
# Function Call
calc_profit(profit, weight, max_weight)
| 54 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class _UpperCamelCase ( _A ):
'''simple docstring'''
@require_torch
def lowerCAmelCase__ ( self : List[str] ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
UpperCamelCase_: Optional[Any] = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
UpperCamelCase_: List[Any] = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
UpperCamelCase_: Dict = """
import socket
def offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
UpperCamelCase_: int = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(snake_case_ )
BertModel.from_pretrained(snake_case_ )
BertTokenizer.from_pretrained(snake_case_ )
pipeline(task="""fill-mask""" , model=snake_case_ )
# baseline - just load from_pretrained with normal network
UpperCamelCase_: Tuple = [sys.executable, """-c""", """\n""".join([load, run, mock] )]
# should succeed
UpperCamelCase_: Optional[int] = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCamelCase_: int = """1"""
UpperCamelCase_: Optional[Any] = subprocess.run(snake_case_ , env=snake_case_ , check=snake_case_ , capture_output=snake_case_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def lowerCAmelCase__ ( self : Any ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
UpperCamelCase_: str = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
UpperCamelCase_: Any = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
UpperCamelCase_: Optional[int] = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
UpperCamelCase_: Optional[int] = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(snake_case_ )
BertModel.from_pretrained(snake_case_ )
BertTokenizer.from_pretrained(snake_case_ )
pipeline(task="""fill-mask""" , model=snake_case_ )
# baseline - just load from_pretrained with normal network
UpperCamelCase_: Optional[int] = [sys.executable, """-c""", """\n""".join([load, run, mock] )]
# should succeed
UpperCamelCase_: List[str] = self.get_env()
UpperCamelCase_: List[Any] = subprocess.run(snake_case_ , env=snake_case_ , check=snake_case_ , capture_output=snake_case_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def lowerCAmelCase__ ( self : int ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
UpperCamelCase_: Dict = """
from transformers import BertConfig, BertModel, BertTokenizer
"""
UpperCamelCase_: Optional[Any] = """
mname = \"hf-internal-testing/tiny-random-bert-sharded\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print(\"success\")
"""
UpperCamelCase_: Dict = """
import socket
def offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
# baseline - just load from_pretrained with normal network
UpperCamelCase_: Union[str, Any] = [sys.executable, """-c""", """\n""".join([load, run] )]
# should succeed
UpperCamelCase_: Union[str, Any] = self.get_env()
UpperCamelCase_: List[Any] = subprocess.run(snake_case_ , env=snake_case_ , check=snake_case_ , capture_output=snake_case_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
# next emulate no network
UpperCamelCase_: Optional[int] = [sys.executable, """-c""", """\n""".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCamelCase_: Union[str, Any] = """1"""
UpperCamelCase_: int = subprocess.run(snake_case_ , env=snake_case_ , check=snake_case_ , capture_output=snake_case_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Dict = """
from transformers import pipeline
"""
UpperCamelCase_: Union[str, Any] = """
mname = \"hf-internal-testing/tiny-random-bert\"
pipe = pipeline(model=mname)
"""
UpperCamelCase_: Dict = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
UpperCamelCase_: Optional[Any] = self.get_env()
UpperCamelCase_: int = """1"""
UpperCamelCase_: int = [sys.executable, """-c""", """\n""".join([load, mock, run] )]
UpperCamelCase_: Any = subprocess.run(snake_case_ , env=snake_case_ , check=snake_case_ , capture_output=snake_case_ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"""You cannot infer task automatically within `pipeline` when using offline mode""" , result.stderr.decode().replace("""\n""" , """""" ) , )
@require_torch
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: str = """
from transformers import AutoModel
"""
UpperCamelCase_: Dict = """
mname = \"hf-internal-testing/test_dynamic_model\"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print(\"success\")
"""
# baseline - just load from_pretrained with normal network
UpperCamelCase_: Optional[int] = [sys.executable, """-c""", """\n""".join([load, run] )]
# should succeed
UpperCamelCase_: Optional[Any] = self.get_env()
UpperCamelCase_: int = subprocess.run(snake_case_ , env=snake_case_ , check=snake_case_ , capture_output=snake_case_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCamelCase_: Union[str, Any] = """1"""
UpperCamelCase_: List[str] = subprocess.run(snake_case_ , env=snake_case_ , check=snake_case_ , capture_output=snake_case_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
| 548 | 0 |
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
__a = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('''All input parameters must be positive''' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('''Relative densities cannot be greater than one''' )
else:
__a = 1 - (matter_density + radiation_density + dark_energy)
__a = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
__a = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
__snake_case :int = 0.3
print(
hubble_parameter(
hubble_constant=6_8.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 60 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__snake_case :Union[str, Any] = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :List[str] = ['''ViTFeatureExtractor''']
__snake_case :Optional[Any] = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :str = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Tuple = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Tuple = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__snake_case :Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 | 1 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = KandinskyVaaPriorPipeline
UpperCamelCase = ['''prompt''']
UpperCamelCase = ['''prompt''', '''negative_prompt''']
UpperCamelCase = [
'''num_images_per_prompt''',
'''generator''',
'''num_inference_steps''',
'''latents''',
'''negative_prompt''',
'''guidance_scale''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase = False
@property
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
return 32
@property
def a__ ( self : str ) -> Any:
"""simple docstring"""
return 32
@property
def a__ ( self : Tuple ) -> int:
"""simple docstring"""
return self.time_input_dim
@property
def a__ ( self : str ) -> str:
"""simple docstring"""
return self.time_input_dim * 4
@property
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
return 100
@property
def a__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def a__ ( self : Any ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(A_ )
@property
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ = {
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
lowerCamelCase_ = PriorTransformer(**A_ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
lowerCamelCase_ = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
lowerCamelCase_ = CLIPVisionModelWithProjection(A_ )
return model
@property
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = CLIPImageProcessor(
crop_size=224 , do_center_crop=A_ , do_normalize=A_ , do_resize=A_ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
def a__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.dummy_prior
lowerCamelCase_ = self.dummy_image_encoder
lowerCamelCase_ = self.dummy_text_encoder
lowerCamelCase_ = self.dummy_tokenizer
lowerCamelCase_ = self.dummy_image_processor
lowerCamelCase_ = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=A_ , clip_sample_range=10.0 , )
lowerCamelCase_ = {
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def a__ ( self : Dict , A_ : Optional[int] , A_ : Union[str, Any]=0 ) -> List[Any]:
"""simple docstring"""
if str(A_ ).startswith('mps' ):
lowerCamelCase_ = torch.manual_seed(A_ )
else:
lowerCamelCase_ = torch.Generator(device=A_ ).manual_seed(A_ )
lowerCamelCase_ = {
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def a__ ( self : Dict ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = 'cpu'
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = self.pipeline_class(**A_ )
lowerCamelCase_ = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
lowerCamelCase_ = pipe(**self.get_dummy_inputs(A_ ) )
lowerCamelCase_ = output.image_embeds
lowerCamelCase_ = pipe(
**self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0]
lowerCamelCase_ = image[0, -10:]
lowerCamelCase_ = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
lowerCamelCase_ = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def a__ ( self : Dict ) -> Any:
"""simple docstring"""
lowerCamelCase_ = torch_device == 'cpu'
lowerCamelCase_ = True
lowerCamelCase_ = False
self._test_inference_batch_single_identical(
test_max_difference=A_ , relax_max_difference=A_ , test_mean_pixel_difference=A_ , )
@skip_mps
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = torch_device == 'cpu'
lowerCamelCase_ = False
self._test_attention_slicing_forward_pass(
test_max_difference=A_ , test_mean_pixel_difference=A_ , )
| 70 |
import numpy as np
def lowerCAmelCase_ (lowerCAmelCase__: np.ndarray , lowerCAmelCase__: float ):
"""simple docstring"""
return np.where(vector > 0 , lowerCAmelCase__ , (alpha * (np.exp(lowerCAmelCase__ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 556 | 0 |
"""simple docstring"""
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def UpperCamelCase ( _lowerCAmelCase : Dict ):
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def UpperCamelCase ( ):
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def UpperCamelCase ( ):
__a = """mock-s3-bucket"""
__a = f"""s3://{mock_bucket}"""
__a = extract_path_from_uri(_lowerCAmelCase )
assert dataset_path.startswith("""s3://""" ) is False
__a = """./local/path"""
__a = extract_path_from_uri(_lowerCAmelCase )
assert dataset_path == new_dataset_path
def UpperCamelCase ( _lowerCAmelCase : Tuple ):
__a = is_remote_filesystem(_lowerCAmelCase )
assert is_remote is True
__a = fsspec.filesystem("""file""" )
__a = is_remote_filesystem(_lowerCAmelCase )
assert is_remote is False
@pytest.mark.parametrize("""compression_fs_class""" , _lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int ):
__a = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_file, """bz2""": bza_file, """lz4""": lza_file}
__a = input_paths[compression_fs_class.protocol]
if input_path is None:
__a = f"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_lowerCAmelCase )
__a = fsspec.filesystem(compression_fs_class.protocol , fo=_lowerCAmelCase )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
__a = os.path.basename(_lowerCAmelCase )
__a = expected_filename[: expected_filename.rindex(""".""" )]
assert fs.glob("""*""" ) == [expected_filename]
with fs.open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as f, open(_lowerCAmelCase , encoding="""utf-8""" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("""protocol""" , ["""zip""", """gzip"""] )
def UpperCamelCase ( _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] ):
__a = {"""zip""": zip_jsonl_path, """gzip""": jsonl_gz_path}
__a = compressed_file_paths[protocol]
__a = """dataset.jsonl"""
__a = f"""{protocol}://{member_file_path}::{compressed_file_path}"""
__a , *__a = fsspec.get_fs_token_paths(_lowerCAmelCase )
assert fs.isfile(_lowerCAmelCase )
assert not fs.isfile("""non_existing_""" + member_file_path )
@pytest.mark.integration
def UpperCamelCase ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] ):
__a = hf_api.dataset_info(_lowerCAmelCase , token=_lowerCAmelCase )
__a = HfFileSystem(repo_info=_lowerCAmelCase , token=_lowerCAmelCase )
assert sorted(hffs.glob("""*""" ) ) == [".gitattributes", "data"]
assert hffs.isdir("""data""" )
assert hffs.isfile(""".gitattributes""" ) and hffs.isfile("""data/text_data.txt""" )
with open(_lowerCAmelCase ) as f:
assert hffs.open("""data/text_data.txt""" , """r""" ).read() == f.read()
def UpperCamelCase ( ):
__a = """bz2"""
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(_lowerCAmelCase , _lowerCAmelCase , clobber=_lowerCAmelCase )
with pytest.warns(_lowerCAmelCase ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(_lowerCAmelCase ) == 1
assert (
str(warning_info[0].message )
== f"""A filesystem protocol was already set for {protocol} and will be overwritten."""
)
| 173 | """simple docstring"""
def UpperCamelCase ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
__a = (boundary[1] - boundary[0]) / steps
__a = boundary[0]
__a = boundary[1]
__a = make_points(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__a = 0.0
y += (h / 2.0) * f(_lowerCAmelCase )
for i in x_i:
# print(i)
y += h * f(_lowerCAmelCase )
y += (h / 2.0) * f(_lowerCAmelCase )
return y
def UpperCamelCase ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : int ):
__a = a + h
while x < (b - h):
yield x
__a = x + h
def UpperCamelCase ( _lowerCAmelCase : int ): # enter your function here
__a = (x - 0) * (x - 0)
return y
def UpperCamelCase ( ):
__a = 0.0 # Lower bound of integration
__a = 1.0 # Upper bound of integration
__a = 10.0 # define number of steps or resolution
__a = [a, b] # define boundary of integration
__a = method_a(_lowerCAmelCase , _lowerCAmelCase )
print(f"""y = {y}""" )
if __name__ == "__main__":
main()
| 173 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class snake_case ( snake_case__ ):
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 675 | from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
snake_case = TypeVar("T")
class __A ( Generic[T] ):
'''simple docstring'''
a_ = 42 # Cache store of keys
a_ = 42 # References of the keys in cache
a_ = 10 # Maximum capacity of cache
def __init__( self , _snake_case ):
_lowerCAmelCase : Tuple = deque()
_lowerCAmelCase : List[Any] = set()
if not n:
_lowerCAmelCase : Any = sys.maxsize
elif n < 0:
raise ValueError("n should be an integer greater than 0." )
else:
_lowerCAmelCase : List[str] = n
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
_lowerCAmelCase : Optional[int] = self.dq_store.pop()
self.key_reference.remove(_snake_case )
else:
self.dq_store.remove(_snake_case )
self.dq_store.appendleft(_snake_case )
self.key_reference.add(_snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
for k in self.dq_store:
print(_snake_case )
def __repr__( self ):
return F"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 424 | 0 |
'''simple docstring'''
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
__magic_name__ = logging.getLogger(__name__)
__magic_name__ = '''pytorch_model.bin'''
@dataclasses.dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : str = dataclasses.field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models.'''} )
__UpperCAmelCase : Optional[str] = dataclasses.field(
default=__lowerCamelCase, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co.'''}, )
@dataclasses.dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : str = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the training data.'''} )
__UpperCAmelCase : str = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the data to predict on.'''} )
__UpperCAmelCase : Optional[str] = dataclasses.field(
default=__lowerCamelCase, metadata={'''help''': '''A csv or a json file containing the validation data.'''} )
__UpperCAmelCase : Optional[str] = dataclasses.field(
default=__lowerCamelCase, metadata={'''help''': '''The name of the task to train on.'''}, )
__UpperCAmelCase : Optional[List[str]] = dataclasses.field(
default=__lowerCamelCase, metadata={'''help''': '''The list of labels for the task.'''} )
@dataclasses.dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : str = dataclasses.field(
metadata={'''help''': '''The output directory where the model predictions and checkpoints will be written.'''} )
__UpperCAmelCase : Optional[str] = dataclasses.field(
default='''accuracy''', metadata={'''help''': '''The evaluation metric used for the task.'''} )
__UpperCAmelCase : Optional[str] = dataclasses.field(
default='''no''', metadata={
'''help''': '''The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'''
}, )
__UpperCAmelCase : Optional[int] = dataclasses.field(
default=10, metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''}, )
__UpperCAmelCase : Optional[float] = dataclasses.field(
default=0.0, metadata={
'''help''': '''How much the specified evaluation metric must improve to satisfy early stopping conditions.'''
}, )
__UpperCAmelCase : Optional[bool] = dataclasses.field(
default=__lowerCamelCase, metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the confidence score.'''}, )
__UpperCAmelCase : Optional[bool] = dataclasses.field(
default=__lowerCamelCase, metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the validation performance.'''}, )
__UpperCAmelCase : Optional[bool] = dataclasses.field(
default=__lowerCamelCase, metadata={'''help''': '''Whether to fine-tune on labeled data after pseudo training.'''}, )
__UpperCAmelCase : Optional[float] = dataclasses.field(
default=0.0, metadata={'''help''': '''Confidence threshold for pseudo-labeled data filtering.'''}, )
__UpperCAmelCase : Optional[int] = dataclasses.field(
default=100, metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''}, )
__UpperCAmelCase : Optional[int] = dataclasses.field(
default=__lowerCamelCase, metadata={'''help''': '''Random seed for initialization.'''}, )
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : str = datasets.concatenate_datasets([infer_input, infer_output] , axis=1)
if args.do_filter_by_confidence:
lowerCamelCase_ : Optional[Any] = dataset.filter(lambda lowerCAmelCase_: example["probability"] > args.confidence_threshold)
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
lowerCamelCase_ : Union[str, Any] = int(eval_result * len(lowerCAmelCase_))
print(lowerCAmelCase_)
lowerCamelCase_ : Tuple = dataset.sort("probability" , reverse=lowerCAmelCase_)
lowerCamelCase_ : Dict = dataset.select(range(lowerCAmelCase_))
lowerCamelCase_ : List[Any] = dataset.remove_columns(["label", "probability"])
lowerCamelCase_ : int = dataset.rename_column("prediction" , "label")
lowerCamelCase_ : Dict = dataset.map(lambda lowerCAmelCase_: {"label": idalabel[example["label"]]})
lowerCamelCase_ : List[str] = dataset.shuffle(seed=args.seed)
lowerCamelCase_ : Optional[Any] = os.path.join(lowerCAmelCase_ , F"""train_pseudo.{args.data_file_extension}""")
if args.data_file_extension == "csv":
dataset.to_csv(lowerCAmelCase_ , index=lowerCAmelCase_)
else:
dataset.to_json(lowerCAmelCase_)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(accelerator.state)
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
lowerCamelCase_ : Any = STModelArguments(model_name_or_path=lowerCAmelCase_)
lowerCamelCase_ : Tuple = STDataArguments(train_file=lowerCAmelCase_ , infer_file=lowerCAmelCase_)
lowerCamelCase_ : Tuple = STTrainingArguments(output_dir=lowerCAmelCase_)
lowerCamelCase_ : List[str] = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(lowerCAmelCase_).items():
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
for key, value in kwargs.items():
if hasattr(lowerCAmelCase_ , lowerCAmelCase_):
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
# Sanity checks
lowerCamelCase_ : Union[str, Any] = {}
lowerCamelCase_ : Union[str, Any] = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
lowerCamelCase_ : Optional[Any] = args.train_file
lowerCamelCase_ : Optional[Any] = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
lowerCamelCase_ : Union[str, Any] = args.eval_file
for key in data_files:
lowerCamelCase_ : int = data_files[key].split(".")[-1]
assert extension in ["csv", "json"], F"""`{key}_file` should be a csv or a json file."""
if args.data_file_extension is None:
lowerCamelCase_ : Optional[int] = extension
else:
assert extension == args.data_file_extension, F"""`{key}_file` should be a {args.data_file_extension} file`."""
assert (
args.eval_metric in datasets.list_metrics()
), F"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."""
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
logger.info("Creating the initial data directory for self-training...")
lowerCamelCase_ : Tuple = F"""{args.output_dir}/self-train_iter-{{}}""".format
lowerCamelCase_ : str = data_dir_format(0)
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=lowerCAmelCase_)
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_)
accelerator.wait_for_everyone()
lowerCamelCase_ : int = None
lowerCamelCase_ : str = None
lowerCamelCase_ : str = 0
lowerCamelCase_ : int = False
# Show the progress bar
lowerCamelCase_ : Optional[int] = tqdm(range(args.max_selftrain_iterations) , disable=not accelerator.is_local_main_process)
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations)):
lowerCamelCase_ : Dict = data_dir_format(lowerCAmelCase_)
assert os.path.exists(lowerCAmelCase_)
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
lowerCamelCase_ : Any = os.path.join(lowerCAmelCase_ , "stage-1")
lowerCamelCase_ : Union[str, Any] = {
"accelerator": accelerator,
"model_name_or_path": args.model_name_or_path,
"cache_dir": args.cache_dir,
"do_train": True,
"train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"],
"do_eval": True if args.eval_file is not None else False,
"eval_file": data_files["eval"],
"do_predict": True,
"infer_file": data_files["infer"],
"task_name": args.task_name,
"label_list": args.label_list,
"output_dir": current_output_dir,
"eval_metric": args.eval_metric,
"evaluation_strategy": args.evaluation_strategy,
"early_stopping_patience": args.early_stopping_patience,
"early_stopping_threshold": args.early_stopping_threshold,
"seed": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(lowerCAmelCase_ , lowerCAmelCase_):
arguments_dict.update({key: value})
lowerCamelCase_ : Optional[Any] = os.path.join(lowerCAmelCase_ , "best-checkpoint" , lowerCAmelCase_)
if os.path.exists(lowerCAmelCase_):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." , lowerCAmelCase_ , lowerCAmelCase_ , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 1 *****" , lowerCAmelCase_)
finetune(**lowerCAmelCase_)
accelerator.wait_for_everyone()
assert os.path.exists(lowerCAmelCase_)
logger.info("Self-training job completed: iteration: %d, stage: 1." , lowerCAmelCase_)
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
lowerCamelCase_ : str = os.path.join(lowerCAmelCase_ , "best-checkpoint")
lowerCamelCase_ : Optional[Any] = os.path.join(lowerCAmelCase_ , "stage-2")
# Update arguments_dict
lowerCamelCase_ : Tuple = model_path
lowerCamelCase_ : Any = data_files["train"]
lowerCamelCase_ : Optional[Any] = current_output_dir
lowerCamelCase_ : List[str] = os.path.join(lowerCAmelCase_ , "best-checkpoint" , lowerCAmelCase_)
if os.path.exists(lowerCAmelCase_):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." , lowerCAmelCase_ , lowerCAmelCase_ , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 2 *****" , lowerCAmelCase_)
finetune(**lowerCAmelCase_)
accelerator.wait_for_everyone()
assert os.path.exists(lowerCAmelCase_)
logger.info("Self-training job completed: iteration: %d, stage: 2." , lowerCAmelCase_)
lowerCamelCase_ : int = iteration
lowerCamelCase_ : Any = data_dir_format(iteration + 1)
lowerCamelCase_ : Any = AutoConfig.from_pretrained(os.path.join(lowerCAmelCase_ , "best-checkpoint"))
lowerCamelCase_ : Tuple = config.idalabel
lowerCamelCase_ : Dict = os.path.join(lowerCAmelCase_ , "eval_results_best-checkpoint.json")
lowerCamelCase_ : int = os.path.join(lowerCAmelCase_ , "test_results_best-checkpoint.json")
assert os.path.exists(lowerCAmelCase_)
with open(lowerCAmelCase_ , "r") as f:
lowerCamelCase_ : List[str] = float(json.load(lowerCAmelCase_)[args.eval_metric])
lowerCamelCase_ : Union[str, Any] = os.path.join(lowerCAmelCase_ , "infer_output_best-checkpoint.csv")
assert os.path.exists(lowerCAmelCase_)
# Loading the dataset from local csv or json files.
lowerCamelCase_ : Tuple = load_dataset(args.data_file_extension , data_files={"data": data_files["infer"]})["data"]
lowerCamelCase_ : Any = load_dataset("csv" , data_files={"data": infer_output_file})["data"]
if accelerator.is_main_process:
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_)
shutil.copy(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , F"""eval_results_iter-{iteration}.json"""))
if os.path.exists(lowerCAmelCase_):
shutil.copy(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , F"""test_results_iter-{iteration}.json"""))
create_pseudo_labeled_data(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
accelerator.wait_for_everyone()
lowerCamelCase_ : Dict = os.path.join(lowerCAmelCase_ , F"""train_pseudo.{args.data_file_extension}""")
if args.evaluation_strategy != IntervalStrategy.NO.value:
lowerCamelCase_ : Union[str, Any] = eval_result
if best_iteration is None:
lowerCamelCase_ : Dict = new_iteration
lowerCamelCase_ : Union[str, Any] = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
lowerCamelCase_ : Optional[int] = new_iteration
lowerCamelCase_ : Dict = new_eval_result
lowerCamelCase_ : Any = 0
else:
if new_eval_result == best_eval_result:
lowerCamelCase_ : Optional[int] = new_iteration
lowerCamelCase_ : int = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
lowerCamelCase_ : Optional[Any] = True
progress_bar.update(1)
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("Best iteration: %d" , lowerCAmelCase_)
logger.info("Best evaluation result: %s = %f" , args.eval_metric , lowerCAmelCase_)
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowerCAmelCase_ , F"""eval_results_iter-{iteration}.json""") , os.path.join(lowerCAmelCase_ , "eval_results_best-iteration.json") , )
else:
# Assume that the last iteration is the best
logger.info("Best iteration: %d" , args.max_selftrain_iterations - 1)
logger.info("Best evaluation result: %s = %f" , args.eval_metric , lowerCAmelCase_)
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowerCAmelCase_ , F"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""") , os.path.join(lowerCAmelCase_ , "eval_results_best-iteration.json") , )
| 708 |
__magic_name__ = {
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.602_176_634E-19,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.35_58_18,
}
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCamelCase_ : List[Any] = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {', '.join(lowerCAmelCase_)}"""
)
raise ValueError(lowerCAmelCase_)
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : list , __snake_case : int , __snake_case : int = 0 , __snake_case : int = 0 ) -> int:
__A : List[Any] = right or len(__snake_case ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__snake_case , __snake_case , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 |
'''simple docstring'''
def lowercase__ ( __lowercase : int | float | str ) -> tuple[int, int]:
"""simple docstring"""
try:
__UpperCamelCase = float(__lowercase )
except ValueError:
raise ValueError('Please enter a valid number' )
__UpperCamelCase = decimal - int(__lowercase )
if fractional_part == 0:
return int(__lowercase ), 1
else:
__UpperCamelCase = len(str(__lowercase ).split('.' )[1] )
__UpperCamelCase = int(decimal * (10**number_of_frac_digits) )
__UpperCamelCase = 10**number_of_frac_digits
__UpperCamelCase , __UpperCamelCase = denominator, numerator
while True:
__UpperCamelCase = dividend % divisor
if remainder == 0:
break
__UpperCamelCase , __UpperCamelCase = divisor, remainder
__UpperCamelCase , __UpperCamelCase = numerator / divisor, denominator / divisor
return int(__lowercase ), int(__lowercase )
if __name__ == "__main__":
print(f'{decimal_to_fraction(2) = }')
print(f'{decimal_to_fraction(89.0) = }')
print(f'{decimal_to_fraction("67") = }')
print(f'{decimal_to_fraction("45.0") = }')
print(f'{decimal_to_fraction(1.5) = }')
print(f'{decimal_to_fraction("6.25") = }')
print(f'{decimal_to_fraction("78td") = }')
| 399 | 0 |
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "new-model"
if is_tf_available():
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = NewModelConfig
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = "bert-base-cased"
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@slow
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = "bert-base-cased"
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = TFAutoModelForPreTraining.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@slow
def _snake_case ( self : Optional[Any] ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = TFAutoModelForCausalLM.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = TFAutoModelForCausalLM.from_pretrained(__lowerCamelCase , output_loading_info=__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@slow
def _snake_case ( self : Union[str, Any] ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = TFAutoModelWithLMHead.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@slow
def _snake_case ( self : str ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = TFAutoModelForMaskedLM.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = TFAutoModelForMaskedLM.from_pretrained(__lowerCamelCase , output_loading_info=__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@slow
def _snake_case ( self : str ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase , output_loading_info=__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@slow
def _snake_case ( self : int ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = TFAutoModelForSequenceClassification.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@slow
def _snake_case ( self : Optional[int] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = TFAutoModelForQuestionAnswering.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@slow
@require_tensorflow_probability
def _snake_case ( self : int ):
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = TFAutoModelForTableQuestionAnswering.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = TFAutoModelForTableQuestionAnswering.from_pretrained(
__lowerCamelCase , output_loading_info=__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = TFAutoModelWithLMHead.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=__lowerCamelCase ) , 14410 )
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = TFAutoModelWithLMHead.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=__lowerCamelCase ) , 14410 )
def _snake_case ( self : List[str] ):
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("sgugger/funnel-random-tiny" )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = copy.deepcopy(model.config )
SCREAMING_SNAKE_CASE = ["FunnelBaseModel"]
SCREAMING_SNAKE_CASE = TFAutoModel.from_config(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : List[str] ):
try:
AutoConfig.register("new-model" , __lowerCamelCase )
SCREAMING_SNAKE_CASE = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(__lowerCamelCase ):
auto_class.register(__lowerCamelCase , __lowerCamelCase )
auto_class.register(__lowerCamelCase , __lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
auto_class.register(__lowerCamelCase , __lowerCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE = BertModelTester(self ).get_config()
SCREAMING_SNAKE_CASE = NewModelConfig(**tiny_config.to_dict() )
SCREAMING_SNAKE_CASE = auto_class.from_config(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = auto_class.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def _snake_case ( self : Union[str, Any] ):
with self.assertRaisesRegex(
__lowerCamelCase , "bert-base is not a local folder and is not a valid model identifier" ):
SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("bert-base" )
def _snake_case ( self : List[Any] ):
with self.assertRaisesRegex(
__lowerCamelCase , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained(__lowerCamelCase , revision="aaaaaa" )
def _snake_case ( self : Dict ):
with self.assertRaisesRegex(
__lowerCamelCase , "hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin" , ):
SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def _snake_case ( self : List[str] ):
with self.assertRaisesRegex(__lowerCamelCase , "Use `from_pt=True` to load this model" ):
SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
def _snake_case ( self : Union[str, Any] ):
# Make sure we have cached the model.
SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
with RequestCounter() as counter:
SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 ) | 698 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__A : Optional[Any] = datasets.load_iris()
__A : Optional[Any] = np.array(data['data'])
__A : Optional[int] = np.array(data['target'])
__A : Union[str, Any] = data['target_names']
__A , __A , __A , __A : Optional[int] = train_test_split(X, y)
def __a ( A__ : Optional[int] , A__ : Dict ):
return np.linalg.norm(np.array(A__ ) - np.array(A__ ) )
def __a ( A__ : Optional[Any] , A__ : int , A__ : Dict , A__ : Optional[Any] , A__ : Dict=5 ):
SCREAMING_SNAKE_CASE = zip(A__ , A__ )
# List of distances of all points from the point to be classified
SCREAMING_SNAKE_CASE = []
for data_point in data:
SCREAMING_SNAKE_CASE = euclidean_distance(data_point[0] , A__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
SCREAMING_SNAKE_CASE = [i[1] for i in sorted(A__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
SCREAMING_SNAKE_CASE = Counter(A__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4])) | 698 | 1 |
"""simple docstring"""
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
__A : str = logging.get_logger(__name__)
def A_ ( snake_case_ : List[Any] ,snake_case_ : Optional[int] ):
'''simple docstring'''
try:
with open(snake_case_ ,"""rb""" ) as flax_state_f:
UpperCamelCase : Optional[Any] = from_bytes(snake_case_ ,flax_state_f.read() )
except UnpicklingError as e:
try:
with open(snake_case_ ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f'Unable to convert {model_file} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(snake_case_ ,snake_case_ )
def A_ ( snake_case_ : Union[str, Any] ,snake_case_ : Tuple ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
UpperCamelCase : Union[str, Any] = flatten_dict(jax.tree_util.tree_map(lambda snake_case_ : x.dtype == jnp.bfloataa ,snake_case_ ) ).values()
if any(snake_case_ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
UpperCamelCase : Optional[Any] = jax.tree_util.tree_map(
lambda snake_case_ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params ,snake_case_ )
UpperCamelCase : int = """"""
UpperCamelCase : List[Any] = flatten_dict(snake_case_ ,sep=""".""" )
UpperCamelCase : Any = pt_model.state_dict()
# keep track of unexpected & missing keys
UpperCamelCase : int = []
UpperCamelCase : List[Any] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCamelCase : Dict = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
UpperCamelCase : Optional[int] = flax_key_tuple_array[:-1] + ["""weight"""]
UpperCamelCase : List[str] = jnp.transpose(snake_case_ ,(3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
UpperCamelCase : List[Any] = flax_key_tuple_array[:-1] + ["""weight"""]
UpperCamelCase : Union[str, Any] = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
UpperCamelCase : Optional[int] = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(snake_case_ ):
UpperCamelCase : List[str] = (
flax_key_tuple_string.replace("""_0""" ,""".0""" )
.replace("""_1""" ,""".1""" )
.replace("""_2""" ,""".2""" )
.replace("""_3""" ,""".3""" )
.replace("""_4""" ,""".4""" )
.replace("""_5""" ,""".5""" )
.replace("""_6""" ,""".6""" )
.replace("""_7""" ,""".7""" )
.replace("""_8""" ,""".8""" )
.replace("""_9""" ,""".9""" )
)
UpperCamelCase : Union[str, Any] = """.""".join(snake_case_ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
f'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
UpperCamelCase : int = np.asarray(snake_case_ ) if not isinstance(snake_case_ ,np.ndarray ) else flax_tensor
UpperCamelCase : Any = torch.from_numpy(snake_case_ )
# remove from missing keys
missing_keys.remove(snake_case_ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(snake_case_ )
pt_model.load_state_dict(snake_case_ )
# re-transform missing_keys to list
UpperCamelCase : Union[str, Any] = list(snake_case_ )
if len(snake_case_ ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
f' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
f' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
f' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(snake_case_ ) > 0:
logger.warning(
f'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
f' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
""" use it for predictions and inference.""" )
return pt_model
| 499 |
"""simple docstring"""
__A : Optional[int] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__A : str = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__A : str = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 499 | 1 |
'''simple docstring'''
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
__UpperCAmelCase : int
__UpperCAmelCase : int
__UpperCAmelCase : float = 0.0
__UpperCAmelCase : int = 1
__UpperCAmelCase : int = 1
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = False
__UpperCAmelCase : bool = False
__UpperCAmelCase : bool = False
__UpperCAmelCase : jnp.dtype = jnp.floataa
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : List[str] = []
_a : List[str] = []
for i in range(self.num_layers ):
_a : Tuple = self.in_channels if i == 0 else self.out_channels
_a : List[Any] = FlaxResnetBlockaD(
in_channels=_a ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(_a )
_a : List[str] = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(_a )
_a : Optional[int] = resnets
_a : Optional[Any] = attentions
if self.add_downsample:
_a : Optional[Any] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self : Union[str, Any] ,_a : List[Any] ,_a : Optional[int] ,_a : Any ,_a : List[Any]=True ):
'''simple docstring'''
_a : Any = ()
for resnet, attn in zip(self.resnets ,self.attentions ):
_a : Any = resnet(_a ,_a ,deterministic=_a )
_a : int = attn(_a ,_a ,deterministic=_a )
output_states += (hidden_states,)
if self.add_downsample:
_a : int = self.downsamplers_a(_a )
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
__UpperCAmelCase : int
__UpperCAmelCase : int
__UpperCAmelCase : float = 0.0
__UpperCAmelCase : int = 1
__UpperCAmelCase : bool = True
__UpperCAmelCase : jnp.dtype = jnp.floataa
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : str = []
for i in range(self.num_layers ):
_a : Optional[Any] = self.in_channels if i == 0 else self.out_channels
_a : Tuple = FlaxResnetBlockaD(
in_channels=_a ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(_a )
_a : List[Any] = resnets
if self.add_downsample:
_a : Optional[Any] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self : Dict ,_a : List[str] ,_a : str ,_a : str=True ):
'''simple docstring'''
_a : Any = ()
for resnet in self.resnets:
_a : Optional[int] = resnet(_a ,_a ,deterministic=_a )
output_states += (hidden_states,)
if self.add_downsample:
_a : str = self.downsamplers_a(_a )
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
__UpperCAmelCase : int
__UpperCAmelCase : int
__UpperCAmelCase : int
__UpperCAmelCase : float = 0.0
__UpperCAmelCase : int = 1
__UpperCAmelCase : int = 1
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = False
__UpperCAmelCase : bool = False
__UpperCAmelCase : bool = False
__UpperCAmelCase : jnp.dtype = jnp.floataa
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Optional[Any] = []
_a : Optional[int] = []
for i in range(self.num_layers ):
_a : int = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_a : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels
_a : List[Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(_a )
_a : str = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(_a )
_a : Optional[Any] = resnets
_a : Optional[int] = attentions
if self.add_upsample:
_a : Dict = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self : List[Any] ,_a : Tuple ,_a : Optional[Any] ,_a : Dict ,_a : Any ,_a : List[Any]=True ):
'''simple docstring'''
for resnet, attn in zip(self.resnets ,self.attentions ):
# pop res hidden states
_a : str = res_hidden_states_tuple[-1]
_a : Dict = res_hidden_states_tuple[:-1]
_a : Union[str, Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
_a : Union[str, Any] = resnet(_a ,_a ,deterministic=_a )
_a : Union[str, Any] = attn(_a ,_a ,deterministic=_a )
if self.add_upsample:
_a : Tuple = self.upsamplers_a(_a )
return hidden_states
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
__UpperCAmelCase : int
__UpperCAmelCase : int
__UpperCAmelCase : int
__UpperCAmelCase : float = 0.0
__UpperCAmelCase : int = 1
__UpperCAmelCase : bool = True
__UpperCAmelCase : jnp.dtype = jnp.floataa
def __lowercase ( self : str ):
'''simple docstring'''
_a : Tuple = []
for i in range(self.num_layers ):
_a : Union[str, Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_a : List[str] = self.prev_output_channel if i == 0 else self.out_channels
_a : Dict = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(_a )
_a : Tuple = resnets
if self.add_upsample:
_a : Dict = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self : List[str] ,_a : List[str] ,_a : Optional[int] ,_a : Tuple ,_a : Any=True ):
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
_a : Any = res_hidden_states_tuple[-1]
_a : str = res_hidden_states_tuple[:-1]
_a : Any = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
_a : Dict = resnet(_a ,_a ,deterministic=_a )
if self.add_upsample:
_a : Union[str, Any] = self.upsamplers_a(_a )
return hidden_states
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
__UpperCAmelCase : int
__UpperCAmelCase : float = 0.0
__UpperCAmelCase : int = 1
__UpperCAmelCase : int = 1
__UpperCAmelCase : bool = False
__UpperCAmelCase : bool = False
__UpperCAmelCase : jnp.dtype = jnp.floataa
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : Dict = [
FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
]
_a : Dict = []
for _ in range(self.num_layers ):
_a : Optional[int] = FlaxTransformeraDModel(
in_channels=self.in_channels ,n_heads=self.num_attention_heads ,d_head=self.in_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(_a )
_a : Dict = FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(_a )
_a : List[str] = resnets
_a : List[Any] = attentions
def __call__( self : int ,_a : int ,_a : int ,_a : Optional[int] ,_a : List[Any]=True ):
'''simple docstring'''
_a : Optional[int] = self.resnets[0](_a ,_a )
for attn, resnet in zip(self.attentions ,self.resnets[1:] ):
_a : Union[str, Any] = attn(_a ,_a ,deterministic=_a )
_a : Optional[int] = resnet(_a ,_a ,deterministic=_a )
return hidden_states
| 702 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__lowerCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : Any ,*_a : Optional[Any] ,**_a : Union[str, Any] ):
'''simple docstring'''
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' ,_a ,)
super().__init__(*_a ,**_a )
| 319 | 0 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def A_ ( lowercase ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : str = int(number**0.5 )
return number == sq * sq
def A_ ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCAmelCase_ : Dict = x_den * y_den * z_den
UpperCAmelCase_ : Union[str, Any] = gcd(lowercase , lowercase )
top //= hcf
bottom //= hcf
return top, bottom
def A_ ( lowercase = 35 ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : List[str] = set()
UpperCAmelCase_ : List[Any] = 42
UpperCAmelCase_ : Dict = Fraction(0 )
UpperCAmelCase_ : Optional[int] = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
UpperCAmelCase_ : Any = x_num * y_den + x_den * y_num
UpperCAmelCase_ : Optional[Any] = x_den * y_den
UpperCAmelCase_ : Union[str, Any] = gcd(lowercase , lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ : Dict = add_three(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
unique_s.add(lowercase )
# n=2
UpperCAmelCase_ : Optional[int] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCAmelCase_ : Optional[int] = x_den * x_den * y_den * y_den
if is_sq(lowercase ) and is_sq(lowercase ):
UpperCAmelCase_ : int = int(sqrt(lowercase ) )
UpperCAmelCase_ : Optional[Any] = int(sqrt(lowercase ) )
UpperCAmelCase_ : int = gcd(lowercase , lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ : Dict = add_three(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
unique_s.add(lowercase )
# n=-1
UpperCAmelCase_ : Any = x_num * y_num
UpperCAmelCase_ : List[Any] = x_den * y_num + x_num * y_den
UpperCAmelCase_ : List[Any] = gcd(lowercase , lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ : Optional[int] = add_three(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
unique_s.add(lowercase )
# n=2
UpperCAmelCase_ : Optional[Any] = x_num * x_num * y_num * y_num
UpperCAmelCase_ : Tuple = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(lowercase ) and is_sq(lowercase ):
UpperCAmelCase_ : str = int(sqrt(lowercase ) )
UpperCAmelCase_ : Optional[int] = int(sqrt(lowercase ) )
UpperCAmelCase_ : Any = gcd(lowercase , lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ : Optional[Any] = add_three(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
unique_s.add(lowercase )
for num, den in unique_s:
total += Fraction(lowercase , lowercase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"""{solution() = }""")
| 470 |
"""simple docstring"""
from __future__ import annotations
_snake_case = [True] * 1_0_0_0_0_0_1
_snake_case = 2
while i * i <= 1_0_0_0_0_0_0:
if seive[i]:
for j in range(i * i, 1_0_0_0_0_0_1, i):
_snake_case = False
i += 1
def __snake_case ( SCREAMING_SNAKE_CASE: int ):
"""simple docstring"""
return seive[n]
def __snake_case ( SCREAMING_SNAKE_CASE: int ):
"""simple docstring"""
return any(digit in '02468' for digit in str(SCREAMING_SNAKE_CASE ) )
def __snake_case ( SCREAMING_SNAKE_CASE: int = 100_0000 ):
"""simple docstring"""
_lowerCAmelCase = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(SCREAMING_SNAKE_CASE ) and not contains_an_even_digit(SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = str(SCREAMING_SNAKE_CASE )
_lowerCAmelCase = [int(str_num[j:] + str_num[:j] ) for j in range(len(SCREAMING_SNAKE_CASE ) )]
if all(is_prime(SCREAMING_SNAKE_CASE ) for i in list_nums ):
result.append(SCREAMING_SNAKE_CASE )
return result
def __snake_case ( ):
"""simple docstring"""
return len(find_circular_primes() )
if __name__ == "__main__":
print(f'{len(find_circular_primes()) = }')
| 580 | 0 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__ ):
A : Any = params
A : Tuple = np.array(_snake_case )
A : int = np.array([len(_snake_case ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self, lowerCamelCase__ ):
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
return len(self.lengths )
def _lowerCAmelCase ( self ):
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def _lowerCAmelCase ( self ):
A : int = self.params.max_model_input_size
A : Any = self.lengths > max_len
logger.info(f'''Splitting {sum(_snake_case )} too long sequences.''' )
def divide_chunks(lowerCamelCase__, lowerCamelCase__ ):
return [l[i : i + n] for i in range(0, len(_snake_case ), _snake_case )]
A : Dict = []
A : Dict = []
if self.params.mlm:
A , A : Optional[Any] = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
A , A : Any = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids, self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
A : List[str] = []
for sub_s in divide_chunks(seq_, max_len - 2 ):
if sub_s[0] != cls_id:
A : Union[str, Any] = np.insert(_snake_case, 0, _snake_case )
if sub_s[-1] != sep_id:
A : int = np.insert(_snake_case, len(_snake_case ), _snake_case )
assert len(_snake_case ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(_snake_case )
new_tok_ids.extend(_snake_case )
new_lengths.extend([len(_snake_case ) for l in sub_seqs] )
A : List[Any] = np.array(_snake_case )
A : Optional[Any] = np.array(_snake_case )
def _lowerCAmelCase ( self ):
A : Any = len(self )
A : List[str] = self.lengths > 11
A : List[Any] = self.token_ids[indices]
A : Any = self.lengths[indices]
A : List[str] = len(self )
logger.info(f'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def _lowerCAmelCase ( self ):
if "unk_token" not in self.params.special_tok_ids:
return
else:
A : Any = self.params.special_tok_ids["""unk_token"""]
A : int = len(self )
A : Optional[Any] = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
A : Optional[Any] = (unk_occs / self.lengths) < 0.5
A : List[Any] = self.token_ids[indices]
A : Tuple = self.lengths[indices]
A : int = len(self )
logger.info(f'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def _lowerCAmelCase ( self ):
if not self.params.is_master:
return
logger.info(f'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : str = [t[0] for t in batch]
A : Dict = [t[1] for t in batch]
assert len(_snake_case ) == len(_snake_case )
# Max for paddings
A : Union[str, Any] = max(_snake_case )
# Pad token ids
if self.params.mlm:
A : Optional[Any] = self.params.special_tok_ids["""pad_token"""]
else:
A : int = self.params.special_tok_ids["""unk_token"""]
A : Union[str, Any] = [list(t.astype(_snake_case ) ) + [pad_idx] * (max_seq_len_ - len(_snake_case )) for t in token_ids]
assert len(tk_ ) == len(_snake_case )
assert all(len(_snake_case ) == max_seq_len_ for t in tk_ )
A : Optional[int] = torch.tensor(tk_ ) # (bs, max_seq_len_)
A : Any = torch.tensor(_snake_case ) # (bs)
return tk_t, lg_t
| 708 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_:Dict = """▁"""
SCREAMING_SNAKE_CASE_:Any = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Dict = BigBirdTokenizer
__lowerCamelCase : int = BigBirdTokenizerFast
__lowerCamelCase : int = True
__lowerCamelCase : List[Any] = True
def _lowerCAmelCase ( self ):
super().setUp()
A : Optional[int] = self.tokenizer_class(lowerCamelCase__, keep_accents=lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self ):
A : Optional[int] = """<s>"""
A : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ), lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ), lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], """<unk>""" )
self.assertEqual(vocab_keys[1], """<s>""" )
self.assertEqual(vocab_keys[-1], """[MASK]""" )
self.assertEqual(len(lowerCamelCase__ ), 1004 )
def _lowerCAmelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size, 1000 )
def _lowerCAmelCase ( self ):
if not self.test_rust_tokenizer:
return
A : Tuple = self.get_tokenizer()
A : Optional[int] = self.get_rust_tokenizer()
A : Tuple = """I was born in 92000, and this is falsé."""
A : Optional[Any] = tokenizer.tokenize(lowerCamelCase__ )
A : int = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
A : Tuple = tokenizer.encode(lowerCamelCase__, add_special_tokens=lowerCamelCase__ )
A : List[str] = rust_tokenizer.encode(lowerCamelCase__, add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
A : Union[str, Any] = self.get_rust_tokenizer()
A : Tuple = tokenizer.encode(lowerCamelCase__ )
A : int = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : int = BigBirdTokenizer(lowerCamelCase__, keep_accents=lowerCamelCase__ )
A : Any = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCamelCase__, ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ), [285, 46, 10, 170, 382], )
A : List[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCamelCase__, [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
], )
A : Any = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__, [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4], )
A : List[str] = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__, [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
], )
@cached_property
def _lowerCAmelCase ( self ):
return BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
@slow
def _lowerCAmelCase ( self ):
A : Union[str, Any] = """Hello World!"""
A : Union[str, Any] = [65, 1_8536, 2260, 101, 66]
self.assertListEqual(lowerCamelCase__, self.big_tokenizer.encode(lowerCamelCase__ ) )
@slow
def _lowerCAmelCase ( self ):
A : int = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
# fmt: off
A : Dict = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 3_4324, 497, 391, 408, 1_1342, 1244, 385, 100, 938, 985, 456, 574, 362, 1_2597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(lowerCamelCase__, self.big_tokenizer.encode(lowerCamelCase__ ) )
@require_torch
@slow
def _lowerCAmelCase ( self ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
A : Dict = list(self.big_tokenizer.get_vocab().keys() )[:10]
A : Optional[Any] = """ """.join(lowerCamelCase__ )
A : int = self.big_tokenizer.encode_plus(lowerCamelCase__, return_tensors="""pt""", return_token_type_ids=lowerCamelCase__ )
A : int = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence], return_tensors="""pt""", return_token_type_ids=lowerCamelCase__ )
A : Tuple = BigBirdConfig(attention_type="""original_full""" )
A : List[str] = BigBirdModel(lowerCamelCase__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCamelCase__ )
model(**lowerCamelCase__ )
@slow
def _lowerCAmelCase ( self ):
A : List[Any] = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
A : Optional[Any] = tokenizer.decode(tokenizer("""Paris is the [MASK].""" ).input_ids )
self.assertTrue(decoded_text == """[CLS] Paris is the[MASK].[SEP]""" )
@slow
def _lowerCAmelCase ( self ):
# fmt: off
A : Any = {"""input_ids""": [[65, 3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114, 66], [65, 448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__, model_name="""google/bigbird-roberta-base""", revision="""215c99f1600e06f83acce68422f2035b2b5c3510""", )
| 520 | 0 |
'''simple docstring'''
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__UpperCAmelCase = logging.getLogger(__name__)
def _snake_case ( A , A ) -> List[Any]:
lowerCAmelCase__ = np.argmax(A , axis=1 )
return np.sum(outputs == labels )
def _snake_case ( A ) -> Dict:
with open(A , encoding='''utf_8''' ) as f:
lowerCAmelCase__ = csv.reader(A )
lowerCAmelCase__ = []
next(A ) # skip the first line
for line in tqdm(A ):
output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def _snake_case ( A , A , A , A , A , A ) -> int:
lowerCAmelCase__ = []
for dataset in encoded_datasets:
lowerCAmelCase__ = len(A )
lowerCAmelCase__ = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
lowerCAmelCase__ = np.zeros((n_batch, 2) , dtype=np.intaa )
lowerCAmelCase__ = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
lowerCAmelCase__ = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(A ):
lowerCAmelCase__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase__ = with_conta
lowerCAmelCase__ = with_conta
lowerCAmelCase__ = len(A ) - 1
lowerCAmelCase__ = len(A ) - 1
lowerCAmelCase__ = with_conta
lowerCAmelCase__ = with_conta
lowerCAmelCase__ = mc_label
lowerCAmelCase__ = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(A ) for t in all_inputs ) )
return tensor_datasets
def _snake_case ( ) -> List[str]:
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=A , default='''openai-gpt''' , help='''pretrained model name''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''' )
parser.add_argument(
'''--output_dir''' , default=A , type=A , required=A , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument('''--train_dataset''' , type=A , default='''''' )
parser.add_argument('''--eval_dataset''' , type=A , default='''''' )
parser.add_argument('''--seed''' , type=A , default=42 )
parser.add_argument('''--num_train_epochs''' , type=A , default=3 )
parser.add_argument('''--train_batch_size''' , type=A , default=8 )
parser.add_argument('''--eval_batch_size''' , type=A , default=16 )
parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=A , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , type=A , default=1 )
parser.add_argument(
'''--max_steps''' , default=-1 , type=A , help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=A , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--learning_rate''' , type=A , default=6.25E-5 )
parser.add_argument('''--warmup_steps''' , default=0 , type=A , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--lr_schedule''' , type=A , default='''warmup_linear''' )
parser.add_argument('''--weight_decay''' , type=A , default=0.01 )
parser.add_argument('''--lm_coef''' , type=A , default=0.9 )
parser.add_argument('''--n_valid''' , type=A , default=374 )
parser.add_argument('''--server_ip''' , type=A , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=A , default='''''' , help='''Can be used for distant debugging.''' )
lowerCAmelCase__ = parser.parse_args()
print(A )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=A )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
lowerCAmelCase__ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
lowerCAmelCase__ = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(A , A ) )
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
lowerCAmelCase__ = ['_start_', '_delimiter_', '_classify_']
lowerCAmelCase__ = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(A )
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(A )
lowerCAmelCase__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(A ) )
model.to(A )
# Load and encode the datasets
def tokenize_and_encode(A ):
if isinstance(A , A ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(A ) )
elif isinstance(A , A ):
return obj
return [tokenize_and_encode(A ) for o in obj]
logger.info('''Encoding dataset...''' )
lowerCAmelCase__ = load_rocstories_dataset(args.train_dataset )
lowerCAmelCase__ = load_rocstories_dataset(args.eval_dataset )
lowerCAmelCase__ = (train_dataset, eval_dataset)
lowerCAmelCase__ = tokenize_and_encode(A )
# Compute the max input length for the Transformer
lowerCAmelCase__ = model.config.n_positions // 2 - 2
lowerCAmelCase__ = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
lowerCAmelCase__ = min(A , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
lowerCAmelCase__ = pre_process_datasets(A , A , A , *A )
lowerCAmelCase__ = tensor_datasets[0], tensor_datasets[1]
lowerCAmelCase__ = TensorDataset(*A )
lowerCAmelCase__ = RandomSampler(A )
lowerCAmelCase__ = DataLoader(A , sampler=A , batch_size=args.train_batch_size )
lowerCAmelCase__ = TensorDataset(*A )
lowerCAmelCase__ = SequentialSampler(A )
lowerCAmelCase__ = DataLoader(A , sampler=A , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
lowerCAmelCase__ = args.max_steps
lowerCAmelCase__ = args.max_steps // (len(A ) // args.gradient_accumulation_steps) + 1
else:
lowerCAmelCase__ = len(A ) // args.gradient_accumulation_steps * args.num_train_epochs
lowerCAmelCase__ = list(model.named_parameters() )
lowerCAmelCase__ = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
lowerCAmelCase__ = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
lowerCAmelCase__ = AdamW(A , lr=args.learning_rate , eps=args.adam_epsilon )
lowerCAmelCase__ = get_linear_schedule_with_warmup(
A , num_warmup_steps=args.warmup_steps , num_training_steps=A )
if args.do_train:
lowerCAmelCase__ = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='''Epoch''' ):
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = tqdm(A , desc='''Training''' )
for step, batch in enumerate(A ):
lowerCAmelCase__ = tuple(t.to(A ) for t in batch )
lowerCAmelCase__ = batch
lowerCAmelCase__ = model(A , mc_token_ids=A , lm_labels=A , mc_labels=A )
lowerCAmelCase__ = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
lowerCAmelCase__ = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
lowerCAmelCase__ = 'Training loss: {:.2e} lr: {:.2e}'.format(A , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
lowerCAmelCase__ = model.module if hasattr(A , '''module''' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
lowerCAmelCase__ = os.path.join(args.output_dir , A )
lowerCAmelCase__ = os.path.join(args.output_dir , A )
torch.save(model_to_save.state_dict() , A )
model_to_save.config.to_json_file(A )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
lowerCAmelCase__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
lowerCAmelCase__ = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(A )
if args.do_eval:
model.eval()
lowerCAmelCase__ = 0, 0
lowerCAmelCase__ = 0, 0
for batch in tqdm(A , desc='''Evaluating''' ):
lowerCAmelCase__ = tuple(t.to(A ) for t in batch )
lowerCAmelCase__ = batch
with torch.no_grad():
lowerCAmelCase__ = model(
A , mc_token_ids=A , lm_labels=A , mc_labels=A )
lowerCAmelCase__ = mc_logits.detach().cpu().numpy()
lowerCAmelCase__ = mc_labels.to('''cpu''' ).numpy()
lowerCAmelCase__ = accuracy(A , A )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
lowerCAmelCase__ = eval_loss / nb_eval_steps
lowerCAmelCase__ = eval_accuracy / nb_eval_examples
lowerCAmelCase__ = tr_loss / nb_tr_steps if args.do_train else None
lowerCAmelCase__ = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
lowerCAmelCase__ = os.path.join(args.output_dir , '''eval_results.txt''' )
with open(A , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , A , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
if __name__ == "__main__":
main() | 90 |
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
SCREAMING_SNAKE_CASE_ : int = tmp_path / 'cache'
SCREAMING_SNAKE_CASE_ : Optional[int] = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : Dict = TextDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'features' , [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
] , )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
SCREAMING_SNAKE_CASE_ : Optional[int] = tmp_path / 'cache'
SCREAMING_SNAKE_CASE_ : List[str] = {'text': 'string'}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : List[str] = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader(SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
SCREAMING_SNAKE_CASE_ : Dict = tmp_path / 'cache'
SCREAMING_SNAKE_CASE_ : str = {'text': 'string'}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TextDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , split=SCREAMING_SNAKE_CASE ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
if issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_path
elif issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : int = [text_path]
SCREAMING_SNAKE_CASE_ : List[Any] = tmp_path / 'cache'
SCREAMING_SNAKE_CASE_ : Optional[int] = {'text': 'string'}
SCREAMING_SNAKE_CASE_ : Optional[Any] = TextDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=("train",) ) -> Dict:
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for split in splits:
SCREAMING_SNAKE_CASE_ : List[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
SCREAMING_SNAKE_CASE_ : Optional[Any] = tmp_path / 'cache'
SCREAMING_SNAKE_CASE_ : Tuple = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : int = TextDatasetReader({'train': text_path} , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'features' , [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
] , )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ : List[Any] = tmp_path / 'cache'
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'text': 'string'}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : Tuple = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : Dict = TextDatasetReader({'train': text_path} , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
if split:
SCREAMING_SNAKE_CASE_ : Optional[Any] = {split: text_path}
else:
SCREAMING_SNAKE_CASE_ : str = 'train'
SCREAMING_SNAKE_CASE_ : int = {'train': text_path, 'test': text_path}
SCREAMING_SNAKE_CASE_ : List[str] = tmp_path / 'cache'
SCREAMING_SNAKE_CASE_ : Tuple = {'text': 'string'}
SCREAMING_SNAKE_CASE_ : Optional[Any] = TextDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 345 | 0 |
"""simple docstring"""
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_UpperCamelCase = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
_UpperCamelCase = {
'allenai/led-base-16384': 1_6384,
}
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
__snake_case : Dict = VOCAB_FILES_NAMES
__snake_case : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : List[Any] = LEDTokenizer
__snake_case : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self :List[str] , __lowercase :List[Any]=None , __lowercase :List[str]=None , __lowercase :int=None , __lowercase :List[str]="replace" , __lowercase :str="<s>" , __lowercase :Tuple="</s>" , __lowercase :List[Any]="</s>" , __lowercase :Tuple="<s>" , __lowercase :Any="<unk>" , __lowercase :Dict="<pad>" , __lowercase :str="<mask>" , __lowercase :Union[str, Any]=False , __lowercase :Union[str, Any]=True , **__lowercase :int , ):
super().__init__(
__lowercase , __lowercase , tokenizer_file=__lowercase , errors=__lowercase , bos_token=__lowercase , eos_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , unk_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , add_prefix_space=__lowercase , trim_offsets=__lowercase , **__lowercase , )
__lowerCamelCase : str =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __lowercase ) != add_prefix_space:
__lowerCamelCase : List[Any] =getattr(__lowercase , pre_tok_state.pop('''type''' ) )
__lowerCamelCase : Dict =add_prefix_space
__lowerCamelCase : Dict =pre_tok_class(**__lowercase )
__lowerCamelCase : Dict =add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__lowerCamelCase : Any ='''post_processor'''
__lowerCamelCase : Tuple =getattr(self.backend_tokenizer , __lowercase , __lowercase )
if tokenizer_component_instance:
__lowerCamelCase : List[Any] =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__lowerCamelCase : Optional[int] =tuple(state['''sep'''] )
if "cls" in state:
__lowerCamelCase : Any =tuple(state['''cls'''] )
__lowerCamelCase : int =False
if state.get('''add_prefix_space''' , __lowercase ) != add_prefix_space:
__lowerCamelCase : List[str] =add_prefix_space
__lowerCamelCase : int =True
if state.get('''trim_offsets''' , __lowercase ) != trim_offsets:
__lowerCamelCase : List[str] =trim_offsets
__lowerCamelCase : Union[str, Any] =True
if changes_to_apply:
__lowerCamelCase : int =getattr(__lowercase , state.pop('''type''' ) )
__lowerCamelCase : Any =component_class(**__lowercase )
setattr(self.backend_tokenizer , __lowercase , __lowercase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def __lowercase ( self :str ):
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __lowercase ( self :List[Any] , __lowercase :Any ):
__lowerCamelCase : Any =AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else value
__lowerCamelCase : Tuple =value
def __lowercase ( self :List[Any] , *__lowercase :int , **__lowercase :Union[str, Any] ):
__lowerCamelCase : List[str] =kwargs.get('''is_split_into_words''' , __lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*__lowercase , **__lowercase )
def __lowercase ( self :List[str] , *__lowercase :Any , **__lowercase :str ):
__lowerCamelCase : Tuple =kwargs.get('''is_split_into_words''' , __lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*__lowercase , **__lowercase )
def __lowercase ( self :int , __lowercase :str , __lowercase :Optional[str] = None ):
__lowerCamelCase : Optional[int] =self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
def __lowercase ( self :Dict , __lowercase :str , __lowercase :Optional[Any]=None ):
__lowerCamelCase : Any =[self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowercase ( self :Tuple , __lowercase :List[int] , __lowercase :Optional[List[int]] = None ):
__lowerCamelCase : List[Any] =[self.sep_token_id]
__lowerCamelCase : Union[str, Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowercase ( self :Union[str, Any] , __lowercase :Union[Dict[str, EncodedInput], BatchEncoding] , __lowercase :Optional[int] = None , __lowercase :PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowercase :Optional[int] = None , __lowercase :Optional[bool] = None , ):
__lowerCamelCase : Optional[Any] =super()._pad(
encoded_inputs=__lowercase , max_length=__lowercase , padding_strategy=__lowercase , pad_to_multiple_of=__lowercase , return_attention_mask=__lowercase , )
# Load from model defaults
if return_attention_mask is None:
__lowerCamelCase : Dict ='''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__lowerCamelCase : List[str] =encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__lowerCamelCase : Any =len(encoded_inputs['''global_attention_mask'''] ) != len(__lowercase )
if needs_to_be_padded:
__lowerCamelCase : List[Any] =len(__lowercase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__lowerCamelCase : Dict =(
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
__lowerCamelCase : Tuple =[-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 363 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self :List[str] , __lowercase :Any , __lowercase :str=100 , __lowercase :Dict=13 , __lowercase :Tuple=30 , __lowercase :Tuple=2 , __lowercase :Dict=3 , __lowercase :Optional[Any]=True , __lowercase :int=True , __lowercase :List[Any]=32 , __lowercase :str=4 , __lowercase :int=4 , __lowercase :List[str]=37 , __lowercase :Dict="gelu" , __lowercase :List[Any]=0.1 , __lowercase :Tuple=0.1 , __lowercase :Union[str, Any]=10 , __lowercase :Any=0.02 , __lowercase :Tuple=3 , __lowercase :Any=None , __lowercase :Optional[int]=[0, 1, 2, 3] , ):
__lowerCamelCase : int =parent
__lowerCamelCase : Optional[Any] =100
__lowerCamelCase : Optional[int] =batch_size
__lowerCamelCase : Tuple =image_size
__lowerCamelCase : Tuple =patch_size
__lowerCamelCase : Optional[Any] =num_channels
__lowerCamelCase : Union[str, Any] =is_training
__lowerCamelCase : int =use_labels
__lowerCamelCase : Optional[int] =hidden_size
__lowerCamelCase : Optional[Any] =num_hidden_layers
__lowerCamelCase : List[Any] =num_attention_heads
__lowerCamelCase : Any =intermediate_size
__lowerCamelCase : int =hidden_act
__lowerCamelCase : Tuple =hidden_dropout_prob
__lowerCamelCase : List[Any] =attention_probs_dropout_prob
__lowerCamelCase : Union[str, Any] =type_sequence_label_size
__lowerCamelCase : List[str] =initializer_range
__lowerCamelCase : Tuple =scope
__lowerCamelCase : Union[str, Any] =out_indices
__lowerCamelCase : Tuple =num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowerCamelCase : Optional[int] =(image_size // patch_size) ** 2
__lowerCamelCase : Optional[Any] =num_patches + 1
def __lowercase ( self :int ):
__lowerCamelCase : Dict =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : Any =None
__lowerCamelCase : str =None
if self.use_labels:
__lowerCamelCase : Dict =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase : List[Any] =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowerCamelCase : List[str] =self.get_config()
return config, pixel_values, labels, pixel_labels
def __lowercase ( self :Tuple ):
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowercase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __lowercase ( self :Any , __lowercase :List[Any] , __lowercase :str , __lowercase :Union[str, Any] , __lowercase :Dict ):
__lowerCamelCase : Tuple =BeitModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__lowerCamelCase : int =model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self :Tuple , __lowercase :Dict , __lowercase :Tuple , __lowercase :Optional[Any] , __lowercase :Any ):
__lowerCamelCase : List[str] =BeitForMaskedImageModeling(config=__lowercase )
model.to(__lowercase )
model.eval()
__lowerCamelCase : int =model(__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __lowercase ( self :Dict , __lowercase :str , __lowercase :Tuple , __lowercase :int , __lowercase :str ):
__lowerCamelCase : int =self.type_sequence_label_size
__lowerCamelCase : Optional[Any] =BeitForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
__lowerCamelCase : Optional[Any] =model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCamelCase : int =1
__lowerCamelCase : str =BeitForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
__lowerCamelCase : Tuple =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCamelCase : str =model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowercase ( self :List[Any] , __lowercase :Dict , __lowercase :List[str] , __lowercase :int , __lowercase :str ):
__lowerCamelCase : Union[str, Any] =self.num_labels
__lowerCamelCase : Any =BeitForSemanticSegmentation(__lowercase )
model.to(__lowercase )
model.eval()
__lowerCamelCase : Any =model(__lowercase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
__lowerCamelCase : Optional[Any] =model(__lowercase , labels=__lowercase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __lowercase ( self :Optional[int] ):
__lowerCamelCase : Optional[int] =self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str =config_and_inputs
__lowerCamelCase : List[Any] ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
__snake_case : List[Any] = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
__snake_case : Any = (
{
"""feature-extraction""": BeitModel,
"""image-classification""": BeitForImageClassification,
"""image-segmentation""": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__snake_case : Union[str, Any] = False
__snake_case : Optional[Any] = False
__snake_case : str = False
def __lowercase ( self :Tuple ):
__lowerCamelCase : Optional[Any] =BeitModelTester(self )
__lowerCamelCase : Optional[int] =ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37 )
def __lowercase ( self :Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''BEiT does not use inputs_embeds''' )
def __lowercase ( self :List[str] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def __lowercase ( self :Union[str, Any] ):
pass
def __lowercase ( self :Any ):
__lowerCamelCase , __lowerCamelCase : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Optional[int] =model_class(__lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCamelCase : Union[str, Any] =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowercase , nn.Linear ) )
def __lowercase ( self :Any ):
__lowerCamelCase , __lowerCamelCase : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Tuple =model_class(__lowercase )
__lowerCamelCase : Tuple =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : Tuple =[*signature.parameters.keys()]
__lowerCamelCase : List[Any] =['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
def __lowercase ( self :str ):
__lowerCamelCase : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def __lowercase ( self :str ):
__lowerCamelCase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowercase )
def __lowercase ( self :Dict ):
__lowerCamelCase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
def __lowercase ( self :Tuple ):
__lowerCamelCase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowercase )
def __lowercase ( self :str ):
if not self.model_tester.is_training:
return
__lowerCamelCase , __lowerCamelCase : Any =self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Optional[Any] =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__lowercase ), BeitForMaskedImageModeling]:
continue
__lowerCamelCase : Any =model_class(__lowercase )
model.to(__lowercase )
model.train()
__lowerCamelCase : Dict =self._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
__lowerCamelCase : List[Any] =model(**__lowercase ).loss
loss.backward()
def __lowercase ( self :List[Any] ):
__lowerCamelCase , __lowerCamelCase : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__lowerCamelCase : str =False
__lowerCamelCase : int =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__lowercase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
__lowerCamelCase : Any =model_class(__lowercase )
model.gradient_checkpointing_enable()
model.to(__lowercase )
model.train()
__lowerCamelCase : Any =self._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
__lowerCamelCase : Union[str, Any] =model(**__lowercase ).loss
loss.backward()
def __lowercase ( self :Dict ):
__lowerCamelCase , __lowerCamelCase : str =self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Tuple =_config_zero_init(__lowercase )
for model_class in self.all_model_classes:
__lowerCamelCase : List[Any] =model_class(config=__lowercase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def __lowercase ( self :int ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : str =BeitModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowerCAmelCase_ ( ):
'''simple docstring'''
__lowerCamelCase : Tuple =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self :Optional[Any] ):
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def __lowercase ( self :Any ):
__lowerCamelCase : Union[str, Any] =BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ).to(__lowercase )
__lowerCamelCase : Union[str, Any] =self.default_image_processor
__lowerCamelCase : List[str] =prepare_img()
__lowerCamelCase : Tuple =image_processor(images=__lowercase , return_tensors='''pt''' ).pixel_values.to(__lowercase )
# prepare bool_masked_pos
__lowerCamelCase : Dict =torch.ones((1, 196) , dtype=torch.bool ).to(__lowercase )
# forward pass
with torch.no_grad():
__lowerCamelCase : List[Any] =model(pixel_values=__lowercase , bool_masked_pos=__lowercase )
__lowerCamelCase : List[Any] =outputs.logits
# verify the logits
__lowerCamelCase : str =torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , __lowercase )
__lowerCamelCase : Tuple =torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(__lowercase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __lowercase , atol=1e-2 ) )
@slow
def __lowercase ( self :Tuple ):
__lowerCamelCase : Optional[int] =BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ).to(__lowercase )
__lowerCamelCase : str =self.default_image_processor
__lowerCamelCase : List[Any] =prepare_img()
__lowerCamelCase : List[Any] =image_processor(images=__lowercase , return_tensors='''pt''' ).to(__lowercase )
# forward pass
with torch.no_grad():
__lowerCamelCase : int =model(**__lowercase )
__lowerCamelCase : List[Any] =outputs.logits
# verify the logits
__lowerCamelCase : Union[str, Any] =torch.Size((1, 1000) )
self.assertEqual(logits.shape , __lowercase )
__lowerCamelCase : Optional[Any] =torch.tensor([-1.2385, -1.0987, -1.0108] ).to(__lowercase )
self.assertTrue(torch.allclose(logits[0, :3] , __lowercase , atol=1e-4 ) )
__lowerCamelCase : str =281
self.assertEqual(logits.argmax(-1 ).item() , __lowercase )
@slow
def __lowercase ( self :int ):
__lowerCamelCase : Optional[int] =BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ).to(
__lowercase )
__lowerCamelCase : str =self.default_image_processor
__lowerCamelCase : str =prepare_img()
__lowerCamelCase : int =image_processor(images=__lowercase , return_tensors='''pt''' ).to(__lowercase )
# forward pass
with torch.no_grad():
__lowerCamelCase : str =model(**__lowercase )
__lowerCamelCase : int =outputs.logits
# verify the logits
__lowerCamelCase : Dict =torch.Size((1, 2_1841) )
self.assertEqual(logits.shape , __lowercase )
__lowerCamelCase : List[Any] =torch.tensor([1.6881, -0.2787, 0.5901] ).to(__lowercase )
self.assertTrue(torch.allclose(logits[0, :3] , __lowercase , atol=1e-4 ) )
__lowerCamelCase : List[Any] =2396
self.assertEqual(logits.argmax(-1 ).item() , __lowercase )
@slow
def __lowercase ( self :Optional[int] ):
__lowerCamelCase : Tuple =BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
__lowerCamelCase : int =model.to(__lowercase )
__lowerCamelCase : Tuple =BeitImageProcessor(do_resize=__lowercase , size=640 , do_center_crop=__lowercase )
__lowerCamelCase : Optional[Any] =load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
__lowerCamelCase : Optional[int] =Image.open(ds[0]['''file'''] )
__lowerCamelCase : Union[str, Any] =image_processor(images=__lowercase , return_tensors='''pt''' ).to(__lowercase )
# forward pass
with torch.no_grad():
__lowerCamelCase : List[Any] =model(**__lowercase )
__lowerCamelCase : Tuple =outputs.logits
# verify the logits
__lowerCamelCase : Optional[Any] =torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , __lowercase )
__lowerCamelCase : List[Any] =version.parse(PIL.__version__ ) < version.parse('''9.0.0''' )
if is_pillow_less_than_a:
__lowerCamelCase : Any =torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=__lowercase , )
else:
__lowerCamelCase : int =torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=__lowercase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowercase , atol=1e-4 ) )
@slow
def __lowercase ( self :Any ):
__lowerCamelCase : Any =BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
__lowerCamelCase : Union[str, Any] =model.to(__lowercase )
__lowerCamelCase : List[str] =BeitImageProcessor(do_resize=__lowercase , size=640 , do_center_crop=__lowercase )
__lowerCamelCase : List[str] =load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
__lowerCamelCase : Optional[Any] =Image.open(ds[0]['''file'''] )
__lowerCamelCase : List[Any] =image_processor(images=__lowercase , return_tensors='''pt''' ).to(__lowercase )
# forward pass
with torch.no_grad():
__lowerCamelCase : Tuple =model(**__lowercase )
__lowerCamelCase : str =outputs.logits.detach().cpu()
__lowerCamelCase : Union[str, Any] =image_processor.post_process_semantic_segmentation(outputs=__lowercase , target_sizes=[(500, 300)] )
__lowerCamelCase : Union[str, Any] =torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , __lowercase )
__lowerCamelCase : Optional[Any] =image_processor.post_process_semantic_segmentation(outputs=__lowercase )
__lowerCamelCase : List[str] =torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , __lowercase )
| 363 | 1 |
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
# TODO Update this
SCREAMING_SNAKE_CASE_ = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class a ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A__ : Optional[int] = "esm"
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=3072 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1026 , snake_case_=0.02 , snake_case_=1e-1_2 , snake_case_="absolute" , snake_case_=True , snake_case_=None , snake_case_=False , snake_case_=False , snake_case_=None , snake_case_=None , **snake_case_ , ) -> str:
super().__init__(pad_token_id=snake_case_ , mask_token_id=snake_case_ , **snake_case_ )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
_UpperCAmelCase = emb_layer_norm_before
_UpperCAmelCase = token_dropout
_UpperCAmelCase = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
_UpperCAmelCase = EsmFoldConfig()
elif isinstance(snake_case_ , snake_case_ ):
_UpperCAmelCase = EsmFoldConfig(**snake_case_ )
_UpperCAmelCase = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
_UpperCAmelCase = get_default_vocab_list()
else:
_UpperCAmelCase = vocab_list
else:
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , snake_case_ ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def __A ( self ) -> Union[str, Any]:
_UpperCAmelCase = super().to_dict()
if isinstance(self.esmfold_config , snake_case_ ):
_UpperCAmelCase = self.esmfold_config.to_dict()
return output
@dataclass
class a :
"""simple docstring"""
A__ : str = None
A__ : bool = True
A__ : bool = False
A__ : bool = False
A__ : bool = False
A__ : float = 0
A__ : bool = True
A__ : bool = False
A__ : int = 128
A__ : "TrunkConfig" = None
def __A ( self ) -> str:
if self.trunk is None:
_UpperCAmelCase = TrunkConfig()
elif isinstance(self.trunk , snake_case_ ):
_UpperCAmelCase = TrunkConfig(**self.trunk )
def __A ( self ) -> List[Any]:
_UpperCAmelCase = asdict(self )
_UpperCAmelCase = self.trunk.to_dict()
return output
@dataclass
class a :
"""simple docstring"""
A__ : int = 48
A__ : int = 1_024
A__ : int = 128
A__ : int = 32
A__ : int = 32
A__ : int = 32
A__ : float = 0
A__ : float = 0
A__ : bool = False
A__ : int = 4
A__ : Optional[int] = 128
A__ : "StructureModuleConfig" = None
def __A ( self ) -> Dict:
if self.structure_module is None:
_UpperCAmelCase = StructureModuleConfig()
elif isinstance(self.structure_module , snake_case_ ):
_UpperCAmelCase = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
_UpperCAmelCase = self.sequence_state_dim // self.sequence_head_width
_UpperCAmelCase = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def __A ( self ) -> Dict:
_UpperCAmelCase = asdict(self )
_UpperCAmelCase = self.structure_module.to_dict()
return output
@dataclass
class a :
"""simple docstring"""
A__ : int = 384
A__ : int = 128
A__ : int = 16
A__ : int = 128
A__ : int = 12
A__ : int = 4
A__ : int = 8
A__ : float = 0.1
A__ : int = 8
A__ : int = 1
A__ : int = 2
A__ : int = 7
A__ : int = 10
A__ : float = 1e-8
A__ : float = 1e5
def __A ( self ) -> Tuple:
return asdict(self )
def A__ ( ) -> str:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 426 |
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def A__ ( A__ ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = image.size
_UpperCAmelCase , _UpperCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_UpperCAmelCase = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
_UpperCAmelCase = np.array(A__ ).astype(np.floataa ) / 255.0
_UpperCAmelCase = image[None].transpose(0 , 3 , 1 , 2 )
_UpperCAmelCase = torch.from_numpy(A__ )
return 2.0 * image - 1.0
class a ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ , snake_case_ , ) -> int:
super().__init__()
self.register_modules(vqvae=snake_case_ , unet=snake_case_ , scheduler=snake_case_ )
@torch.no_grad()
def __call__( self , snake_case_ = None , snake_case_ = 1 , snake_case_ = 100 , snake_case_ = 0.0 , snake_case_ = None , snake_case_ = "pil" , snake_case_ = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(snake_case_ , PIL.Image.Image ):
_UpperCAmelCase = 1
elif isinstance(snake_case_ , torch.Tensor ):
_UpperCAmelCase = image.shape[0]
else:
raise ValueError(F"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(snake_case_ )}""" )
if isinstance(snake_case_ , PIL.Image.Image ):
_UpperCAmelCase = preprocess(snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_UpperCAmelCase = (batch_size, self.unet.config.in_channels // 2, height, width)
_UpperCAmelCase = next(self.unet.parameters() ).dtype
_UpperCAmelCase = randn_tensor(snake_case_ , generator=snake_case_ , device=self.device , dtype=snake_case_ )
_UpperCAmelCase = image.to(device=self.device , dtype=snake_case_ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(snake_case_ , device=self.device )
_UpperCAmelCase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_UpperCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_UpperCAmelCase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_UpperCAmelCase = {}
if accepts_eta:
_UpperCAmelCase = eta
for t in self.progress_bar(snake_case_ ):
# concat latents and low resolution image in the channel dimension.
_UpperCAmelCase = torch.cat([latents, image] , dim=1 )
_UpperCAmelCase = self.scheduler.scale_model_input(snake_case_ , snake_case_ )
# predict the noise residual
_UpperCAmelCase = self.unet(snake_case_ , snake_case_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
# decode the image latents with the VQVAE
_UpperCAmelCase = self.vqvae.decode(snake_case_ ).sample
_UpperCAmelCase = torch.clamp(snake_case_ , -1.0 , 1.0 )
_UpperCAmelCase = image / 2 + 0.5
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(snake_case_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case_ )
| 426 | 1 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def _UpperCamelCase ( __A , __A , __A , __A , __A ) -> float:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__A )] )
UpperCamelCase__ = np.array(__A )
UpperCamelCase__ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __A ) ) , x.transpose() ) , __A )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def _UpperCamelCase ( __A , __A , __A ) -> float:
'''simple docstring'''
UpperCamelCase__ = (1, 2, 1)
UpperCamelCase__ = (1, 1, 0, 7)
UpperCamelCase__ = SARIMAX(
__A , exog=__A , order=__A , seasonal_order=__A )
UpperCamelCase__ = model.fit(disp=__A , maxiter=600 , method="nm" )
UpperCamelCase__ = model_fit.predict(1 , len(__A ) , exog=[test_match] )
return result[0]
def _UpperCamelCase ( __A , __A , __A ) -> float:
'''simple docstring'''
UpperCamelCase__ = SVR(kernel="rbf" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(__A , __A )
UpperCamelCase__ = regressor.predict(__A )
return y_pred[0]
def _UpperCamelCase ( __A ) -> float:
'''simple docstring'''
train_user.sort()
UpperCamelCase__ = np.percentile(__A , 25 )
UpperCamelCase__ = np.percentile(__A , 75 )
UpperCamelCase__ = qa - qa
UpperCamelCase__ = qa - (iqr * 0.1)
return low_lim
def _UpperCamelCase ( __A , __A ) -> bool:
'''simple docstring'''
UpperCamelCase__ = 0
UpperCamelCase__ = 0
for i in list_vote:
if i > actual_result:
UpperCamelCase__ = not_safe + 1
else:
if abs(abs(__A ) - abs(__A ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
a__ : Union[str, Any] = [[1_8_2_3_1, 0.0, 1], [2_2_6_2_1, 1.0, 2], [1_5_6_7_5, 0.0, 3], [2_3_5_8_3, 1.0, 4]]
a__ : Union[str, Any] = pd.DataFrame(
data_input, columns=['total_user', 'total_even', 'days']
)
a__ : int = Normalizer().fit_transform(data_input_df.values)
# split data
a__ : List[str] = normalize_df[:, 2].tolist()
a__ : List[Any] = normalize_df[:, 0].tolist()
a__ : int = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
a__ : str = normalize_df[:, [1, 2]].tolist()
a__ : Optional[int] = x[: len(x) - 1]
a__ : Optional[int] = x[len(x) - 1 :]
# for linear regression & sarimax
a__ : Optional[int] = total_date[: len(total_date) - 1]
a__ : Optional[int] = total_user[: len(total_user) - 1]
a__ : Dict = total_match[: len(total_match) - 1]
a__ : str = total_date[len(total_date) - 1 :]
a__ : Union[str, Any] = total_user[len(total_user) - 1 :]
a__ : Optional[int] = total_match[len(total_match) - 1 :]
# voting system with forecasting
a__ : Dict = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
a__ : Dict = '' if data_safety_checker(res_vote, tst_user) else 'not '
print('Today\'s data is {not_str}safe.')
| 223 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class lowercase_ :
__UpperCAmelCase = 42
__UpperCAmelCase = None
__UpperCAmelCase = None
def _UpperCamelCase ( ) -> Node | None:
'''simple docstring'''
UpperCamelCase__ = Node(1 )
UpperCamelCase__ = Node(2 )
UpperCamelCase__ = Node(3 )
UpperCamelCase__ = Node(4 )
UpperCamelCase__ = Node(5 )
return tree
def _UpperCamelCase ( __A ) -> list[int]:
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def _UpperCamelCase ( __A ) -> list[int]:
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def _UpperCamelCase ( __A ) -> list[int]:
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def _UpperCamelCase ( __A ) -> Sequence[Node | None]:
'''simple docstring'''
UpperCamelCase__ = []
if root is None:
return output
UpperCamelCase__ = deque([root] )
while process_queue:
UpperCamelCase__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def _UpperCamelCase ( __A , __A ) -> Sequence[Node | None]:
'''simple docstring'''
UpperCamelCase__ = []
def populate_output(__A , __A ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__A , __A )
return output
def _UpperCamelCase ( __A , __A ) -> Sequence[Node | None]:
'''simple docstring'''
UpperCamelCase__ = []
def populate_output(__A , __A ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__A , __A )
return output
def _UpperCamelCase ( __A ) -> Sequence[Node | None] | list[Any]:
'''simple docstring'''
if root is None:
return []
UpperCamelCase__ = []
UpperCamelCase__ = 0
UpperCamelCase__ = height(__A )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__A , __A ) )
UpperCamelCase__ = 1
else:
output.append(get_nodes_from_right_to_left(__A , __A ) )
UpperCamelCase__ = 0
return output
def _UpperCamelCase ( ) -> None: # Main function for testing.
'''simple docstring'''
UpperCamelCase__ = make_tree()
print(F'''In-order Traversal: {inorder(__A )}''' )
print(F'''Pre-order Traversal: {preorder(__A )}''' )
print(F'''Post-order Traversal: {postorder(__A )}''' , "\n" )
print(F'''Height of Tree: {height(__A )}''' , "\n" )
print("Complete Level Order Traversal: " )
print(level_order(__A ) , "\n" )
print("Level-wise order Traversal: " )
for level in range(1 , height(__A ) + 1 ):
print(F'''Level {level}:''' , get_nodes_from_left_to_right(__A , level=__A ) )
print("\nZigZag order Traversal: " )
print(zigzag(__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 223 | 1 |
"""simple docstring"""
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("""To use the rich extension, install rich with `pip install rich`""")
| 453 |
"""simple docstring"""
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(__magic_name__ )
class __a ( __magic_name__ ):
"""simple docstring"""
def __init__( self , *snake_case , **snake_case ):
"""simple docstring"""
super().__init__(*snake_case , **snake_case )
self.check_model_type(snake_case )
def SCREAMING_SNAKE_CASE_ ( self , snake_case=None , snake_case=None , snake_case=None , **snake_case ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = {}, {}
if padding is not None:
lowerCAmelCase__ : Dict = padding
if truncation is not None:
lowerCAmelCase__ : Tuple = truncation
if top_k is not None:
lowerCAmelCase__ : str = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , snake_case , snake_case = None , **snake_case ):
"""simple docstring"""
if isinstance(snake_case , (Image.Image, str) ) and isinstance(snake_case , snake_case ):
lowerCAmelCase__ : int = {"image": image, "question": question}
else:
lowerCAmelCase__ : Optional[int] = image
lowerCAmelCase__ : Optional[Any] = super().__call__(snake_case , **snake_case )
return results
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case=False , snake_case=False ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = load_image(inputs["image"] )
lowerCAmelCase__ : str = self.tokenizer(
inputs["question"] , return_tensors=self.framework , padding=snake_case , truncation=snake_case )
lowerCAmelCase__ : int = self.image_processor(images=snake_case , return_tensors=self.framework )
model_inputs.update(snake_case )
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self , snake_case ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.model(**snake_case )
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
lowerCAmelCase__ : List[Any] = self.model.config.num_labels
if self.framework == "pt":
lowerCAmelCase__ : Optional[int] = model_outputs.logits.sigmoid()[0]
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = probs.topk(snake_case )
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
lowerCAmelCase__ : int = scores.tolist()
lowerCAmelCase__ : str = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(snake_case , snake_case )]
| 453 | 1 |
import math
def lowerCamelCase_ ( lowerCAmelCase: int )-> list[int]:
_snake_case : str = []
_snake_case : Optional[int] = 2
_snake_case : int = int(math.sqrt(lowerCAmelCase ) ) # Size of every segment
_snake_case : List[Any] = [True] * (end + 1)
_snake_case : Dict = []
while start <= end:
if temp[start] is True:
in_prime.append(lowerCAmelCase )
for i in range(start * start , end + 1 , lowerCAmelCase ):
_snake_case : int = False
start += 1
prime += in_prime
_snake_case : Any = end + 1
_snake_case : int = min(2 * end , lowerCAmelCase )
while low <= n:
_snake_case : List[str] = [True] * (high - low + 1)
for each in in_prime:
_snake_case : List[Any] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowerCAmelCase , high + 1 , lowerCAmelCase ):
_snake_case : Tuple = False
for j in range(len(lowerCAmelCase ) ):
if temp[j] is True:
prime.append(j + low )
_snake_case : List[Any] = high + 1
_snake_case : Tuple = min(high + end , lowerCAmelCase )
return prime
print(sieve(10**6))
| 669 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ = {
"""google/realm-cc-news-pretrained-embedder""": 512,
"""google/realm-cc-news-pretrained-encoder""": 512,
"""google/realm-cc-news-pretrained-scorer""": 512,
"""google/realm-cc-news-pretrained-openqa""": 512,
"""google/realm-orqa-nq-openqa""": 512,
"""google/realm-orqa-nq-reader""": 512,
"""google/realm-orqa-wq-openqa""": 512,
"""google/realm-orqa-wq-reader""": 512,
}
lowerCAmelCase_ = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] =VOCAB_FILES_NAMES
a_ : List[str] =PRETRAINED_VOCAB_FILES_MAP
a_ : str =PRETRAINED_INIT_CONFIGURATION
a_ : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : List[Any] =RealmTokenizer
def __init__( self : List[str] , UpperCamelCase : Optional[int]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[Any]=True , UpperCamelCase : Optional[Any]="[UNK]" , UpperCamelCase : Any="[SEP]" , UpperCamelCase : Optional[Any]="[PAD]" , UpperCamelCase : Optional[int]="[CLS]" , UpperCamelCase : Optional[Any]="[MASK]" , UpperCamelCase : Dict=True , UpperCamelCase : Optional[int]=None , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
_snake_case : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase ) != tokenize_chinese_chars
):
_snake_case : int = getattr(UpperCamelCase , normalizer_state.pop('type' ) )
_snake_case : List[str] = do_lower_case
_snake_case : List[Any] = strip_accents
_snake_case : Dict = tokenize_chinese_chars
_snake_case : Any = normalizer_class(**UpperCamelCase )
_snake_case : Optional[int] = do_lower_case
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = PaddingStrategy.MAX_LENGTH
_snake_case : Any = text
_snake_case : List[str] = kwargs.pop('text_pair' , UpperCamelCase )
_snake_case : int = kwargs.pop('return_tensors' , UpperCamelCase )
_snake_case : Optional[int] = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(UpperCamelCase ):
if batch_text_pair is not None:
_snake_case : List[Any] = batch_text_pair[idx]
else:
_snake_case : Optional[Any] = None
_snake_case : Optional[int] = super().__call__(UpperCamelCase , UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
_snake_case : str = encoded_candidates.get('input_ids' )
_snake_case : Tuple = encoded_candidates.get('attention_mask' )
_snake_case : List[str] = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(UpperCamelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(UpperCamelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(UpperCamelCase )
_snake_case : str = {key: item for key, item in output_data.items() if len(UpperCamelCase ) != 0}
return BatchEncoding(UpperCamelCase , tensor_type=UpperCamelCase )
def UpperCamelCase_ ( self : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any]=None ):
'''simple docstring'''
_snake_case : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : int = [self.sep_token_id]
_snake_case : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
_snake_case : Optional[Any] = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 669 | 1 |
'''simple docstring'''
class UpperCAmelCase :
def __init__( self : str , __snake_case : list ) -> None:
_lowerCAmelCase = set_counts
_lowerCAmelCase = max(__snake_case )
_lowerCAmelCase = len(__snake_case )
_lowerCAmelCase = [1] * num_sets
_lowerCAmelCase = list(range(__snake_case ) )
def lowercase__ ( self : Dict , __snake_case : int , __snake_case : int ) -> bool:
_lowerCAmelCase = self.get_parent(__snake_case )
_lowerCAmelCase = self.get_parent(__snake_case )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
_lowerCAmelCase = 0
_lowerCAmelCase = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
_lowerCAmelCase = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
_lowerCAmelCase = 0
_lowerCAmelCase = src_parent
_lowerCAmelCase = self.set_counts[src_parent]
_lowerCAmelCase = max(self.max_set , __snake_case )
return True
def lowercase__ ( self : int , __snake_case : int ) -> int:
if self.parents[disj_set] == disj_set:
return disj_set
_lowerCAmelCase = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 207 |
'''simple docstring'''
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
_lowerCAmelCase = 1
_lowerCAmelCase = 1
while repunit:
_lowerCAmelCase = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def UpperCamelCase__ ( lowerCAmelCase = 1_00_00_00 ):
"""simple docstring"""
_lowerCAmelCase = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(lowerCAmelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"""{solution() = }""")
| 207 | 1 |
from __future__ import annotations
from typing import Any
class lowerCamelCase_ :
def __init__( self , _SCREAMING_SNAKE_CASE ):
a_ = num_of_nodes
a_ = []
a_ = {}
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.m_edges.append([u_node, v_node, weight] )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
a_ = self.find_component(UpperCAmelCase_ )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if component_size[u_node] <= component_size[v_node]:
a_ = v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCAmelCase_ )
elif component_size[u_node] >= component_size[v_node]:
a_ = self.find_component(UpperCAmelCase_ )
component_size[u_node] += component_size[v_node]
self.set_component(UpperCAmelCase_ )
def __magic_name__ ( self ):
a_ = []
a_ = 0
a_ = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
a_ = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
a_ , a_ , a_ = edge
a_ = self.m_component[u]
a_ = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
a_ = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
a_ , a_ , a_ = edge
a_ = self.m_component[u]
a_ = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
a_ = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def __SCREAMING_SNAKE_CASE ( ) -> Any:
"""simple docstring"""
pass
if __name__ == "__main__":
import doctest
doctest.testmod() | 711 |
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : int = 10**9 ) -> int:
"""simple docstring"""
a_ = 1
a_ = 2
a_ = 0
a_ = 0
a_ = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
a_ = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'{solution() = }') | 403 | 0 |
import argparse
from collections import defaultdict
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase__ : Union[str, Any] = f'''{file}_{class_name}_{test_name}'''
done_test[_id] += 1
with open(UpperCamelCase__ , '''r''' ) as f:
UpperCamelCase__ : Tuple = f.readlines()
UpperCamelCase__ : int = f'''class {class_name}('''
UpperCamelCase__ : str = f'''{4 * " "}def {test_name}('''
UpperCamelCase__ : List[Any] = f'''{8 * " "}{correct_line.split()[0]}'''
UpperCamelCase__ : int = f'''{1_6 * " "}{correct_line.split()[0]}'''
UpperCamelCase__ : Union[str, Any] = False
UpperCamelCase__ : Optional[int] = False
UpperCamelCase__ : Union[str, Any] = False
UpperCamelCase__ : str = False
UpperCamelCase__ : List[str] = 0
UpperCamelCase__ : List[str] = 0
UpperCamelCase__ : Tuple = []
for line in lines:
if line.startswith(UpperCamelCase__ ):
UpperCamelCase__ : Tuple = True
elif in_class and line.startswith(UpperCamelCase__ ):
UpperCamelCase__ : Dict = True
elif in_class and in_func and (line.startswith(UpperCamelCase__ ) or line.startswith(UpperCamelCase__ )):
UpperCamelCase__ : List[str] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
UpperCamelCase__ : str = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
UpperCamelCase__ : str = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f'''{spaces * " "}{correct_line}''' )
UpperCamelCase__ : Optional[int] = False
else:
new_lines.append(UpperCamelCase__ )
with open(UpperCamelCase__ , '''w''' ) as f:
for line in new_lines:
f.write(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__=None ):
if fail is not None:
with open(UpperCamelCase__ , '''r''' ) as f:
UpperCamelCase__ : int = {l.strip() for l in f.readlines()}
else:
UpperCamelCase__ : Tuple = None
with open(UpperCamelCase__ , '''r''' ) as f:
UpperCamelCase__ : Optional[int] = f.readlines()
UpperCamelCase__ : str = defaultdict(UpperCamelCase__ )
for line in correct_lines:
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Dict = line.split(''';''' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
lowerCamelCase =argparse.ArgumentParser()
parser.add_argument("--correct_filename", help="filename of tests with expected result")
parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None)
lowerCamelCase =parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 285 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase =logging.get_logger(__name__)
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['''pixel_values''']
def __init__( self , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = 3_2 , __SCREAMING_SNAKE_CASE=PILImageResampling.BILINEAR , __SCREAMING_SNAKE_CASE = True , **__SCREAMING_SNAKE_CASE , ) -> None:
"""simple docstring"""
UpperCamelCase__ : Dict = do_resize
UpperCamelCase__ : Tuple = do_rescale
UpperCamelCase__ : Dict = size_divisor
UpperCamelCase__ : str = resample
super().__init__(**__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE ) -> np.ndarray:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : Optional[Any] = get_image_size(__SCREAMING_SNAKE_CASE )
# Rounds the height and width down to the closest multiple of size_divisor
UpperCamelCase__ : List[Any] = height // size_divisor * size_divisor
UpperCamelCase__ : Dict = width // size_divisor * size_divisor
UpperCamelCase__ : str = resize(__SCREAMING_SNAKE_CASE , (new_h, new_w) , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
return image
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE ) -> np.ndarray:
"""simple docstring"""
return rescale(image=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE , ) -> BatchFeature:
"""simple docstring"""
UpperCamelCase__ : Dict = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ : Union[str, Any] = size_divisor if size_divisor is not None else self.size_divisor
UpperCamelCase__ : List[str] = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
UpperCamelCase__ : List[Any] = make_list_of_images(__SCREAMING_SNAKE_CASE )
if not valid_images(__SCREAMING_SNAKE_CASE ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
UpperCamelCase__ : Dict = [to_numpy_array(__SCREAMING_SNAKE_CASE ) for img in images]
if do_resize:
UpperCamelCase__ : Optional[int] = [self.resize(__SCREAMING_SNAKE_CASE , size_divisor=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
UpperCamelCase__ : Union[str, Any] = [self.rescale(__SCREAMING_SNAKE_CASE , scale=1 / 2_5_5 ) for image in images]
UpperCamelCase__ : List[str] = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase__ : Optional[Any] = {'''pixel_values''': images}
return BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
| 285 | 1 |
'''simple docstring'''
import os
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase_ : List[Any] = len(grid[0] )
lowercase_ : Union[str, Any] = len(SCREAMING_SNAKE_CASE_ )
lowercase_ : Union[str, Any] = 0
lowercase_ : int = 0
lowercase_ : Union[str, Any] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(n_rows - 3 ):
lowercase_ : Dict = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
lowercase_ : Optional[Any] = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
lowercase_ : Tuple = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
lowercase_ : int = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
lowercase_ : Optional[int] = max(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if max_product > largest:
lowercase_ : List[Any] = max_product
return largest
def _UpperCamelCase ( ):
lowercase_ : int = []
with open(os.path.dirname(SCREAMING_SNAKE_CASE_ ) + '/grid.txt' ) as file:
for line in file:
grid.append(line.strip('\n' ).split(' ' ) )
lowercase_ : Dict = [[int(SCREAMING_SNAKE_CASE_ ) for i in grid[j]] for j in range(len(SCREAMING_SNAKE_CASE_ ) )]
return largest_product(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(solution())
| 438 | '''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__(self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , ) -> int:
lowercase_ : List[str] = size if size is not None else {'height': 18, 'width': 18}
lowercase_ : Dict = parent
lowercase_ : Optional[Any] = batch_size
lowercase_ : int = num_channels
lowercase_ : List[Any] = image_size
lowercase_ : Dict = min_resolution
lowercase_ : List[Any] = max_resolution
lowercase_ : Optional[Any] = do_resize
lowercase_ : int = size
lowercase_ : Optional[int] = apply_ocr
def _lowerCamelCase (self ) -> List[str]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase__ ( _snake_case , unittest.TestCase ):
"""simple docstring"""
A : int = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _lowerCamelCase (self ) -> Optional[int]:
lowercase_ : str = LayoutLMvaImageProcessingTester(self )
@property
def _lowerCamelCase (self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase (self ) -> int:
lowercase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , 'do_resize' ) )
self.assertTrue(hasattr(_a , 'size' ) )
self.assertTrue(hasattr(_a , 'apply_ocr' ) )
def _lowerCamelCase (self ) -> Tuple:
lowercase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
lowercase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def _lowerCamelCase (self ) -> int:
pass
def _lowerCamelCase (self ) -> Dict:
# Initialize image_processing
lowercase_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
lowercase_ : Any = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , _a )
self.assertIsInstance(encoding.boxes , _a )
# Test batched
lowercase_ : Union[str, Any] = image_processing(_a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _lowerCamelCase (self ) -> Tuple:
# Initialize image_processing
lowercase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
lowercase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase_ : str = image_processing(_a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _lowerCamelCase (self ) -> Union[str, Any]:
# Initialize image_processing
lowercase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
lowercase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase_ : str = image_processing(_a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _lowerCamelCase (self ) -> str:
# with apply_OCR = True
lowercase_ : Optional[int] = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowercase_ : List[Any] = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
lowercase_ : Union[str, Any] = Image.open(ds[0]['file'] ).convert('RGB' )
lowercase_ : List[str] = image_processing(_a , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowercase_ : str = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
lowercase_ : Optional[Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _a )
self.assertListEqual(encoding.boxes , _a )
# with apply_OCR = False
lowercase_ : Optional[int] = LayoutLMvaImageProcessor(apply_ocr=_a )
lowercase_ : Dict = image_processing(_a , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 438 | 1 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 196 |
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=[8, 16, 32, 64] , SCREAMING_SNAKE_CASE__=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="relu" , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=["stage2", "stage3", "stage4"] , SCREAMING_SNAKE_CASE__=[2, 3, 4] , SCREAMING_SNAKE_CASE__=1 , ):
'''simple docstring'''
snake_case: Dict = parent
snake_case: List[Any] = batch_size
snake_case: Any = image_size
snake_case: Optional[Any] = num_channels
snake_case: List[Any] = embeddings_size
snake_case: Tuple = hidden_sizes
snake_case: str = depths
snake_case: str = is_training
snake_case: List[str] = use_labels
snake_case: Tuple = hidden_act
snake_case: List[str] = num_labels
snake_case: Optional[int] = scope
snake_case: Any = len(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = out_features
snake_case: Optional[Any] = out_indices
snake_case: str = num_groups
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case: str = None
if self.use_labels:
snake_case: Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
snake_case: Tuple = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self ):
'''simple docstring'''
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: str = BitModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
snake_case: Union[str, Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: str = self.num_labels
snake_case: int = BitForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
snake_case: List[Any] = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[Any] = BitBackbone(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
snake_case: int = model(SCREAMING_SNAKE_CASE__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
snake_case: str = None
snake_case: int = BitBackbone(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
snake_case: str = model(SCREAMING_SNAKE_CASE__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case: str = config_and_inputs
snake_case: int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( snake_case , snake_case , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
__UpperCamelCase = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = BitModelTester(self )
snake_case: str = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCamelCase ( self ):
'''simple docstring'''
return
@unittest.skip(reason='Bit does not output attentions' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Bit does not use inputs_embeds' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Bit does not support input and output embeddings' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case , snake_case: Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case: Union[str, Any] = model_class(SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case: List[str] = [*signature.parameters.keys()]
snake_case: Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case , snake_case: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case: Optional[int] = model_class(config=SCREAMING_SNAKE_CASE__ )
for name, module in model.named_modules():
if isinstance(SCREAMING_SNAKE_CASE__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def _UpperCamelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case: Tuple = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
snake_case: Optional[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
snake_case: Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case: Optional[int] = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case , snake_case: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case: Optional[int] = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
snake_case: Tuple = layer_type
snake_case: str = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case: Tuple = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@unittest.skip(reason='Bit does not use feedforward chunking' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case: Union[str, Any] = BitModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _UpperCamelCase ( self ):
'''simple docstring'''
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Tuple = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(SCREAMING_SNAKE_CASE__ )
snake_case: str = self.default_image_processor
snake_case: List[Any] = prepare_img()
snake_case: List[str] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
snake_case: Optional[int] = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
snake_case: List[Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (BitBackbone,) if is_torch_available() else ()
__UpperCamelCase = BitConfig
__UpperCamelCase = False
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = BitModelTester(self ) | 329 | 0 |
"""simple docstring"""
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ComputeEnvironment.AMAZON_SAGEMAKER
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : Any = "ml.p3.2xlarge"
SCREAMING_SNAKE_CASE_ : int = "accelerate_sagemaker_execution_role"
SCREAMING_SNAKE_CASE_ : Any = "hf-sm"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "us-east-1"
SCREAMING_SNAKE_CASE_ : Optional[int] = 1
SCREAMING_SNAKE_CASE_ : List[str] = "accelerate-sagemaker-1"
SCREAMING_SNAKE_CASE_ : List[str] = "1.6"
SCREAMING_SNAKE_CASE_ : int = "4.4"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "train.py"
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
"--model_name_or_path",
"bert",
"--do_train",
"False",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
SCREAMING_SNAKE_CASE_ : str = [
"--model_name_or_path",
"bert",
"--do_train",
"--do_test",
"False",
"--do_predict",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
_lowercase : Optional[Any] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["""model_name_or_path"""] ,UpperCAmelCase_ )
assert isinstance(converted_args["""do_train"""] ,UpperCAmelCase_ )
assert isinstance(converted_args["""epochs"""] ,UpperCAmelCase_ )
assert isinstance(converted_args["""learning_rate"""] ,UpperCAmelCase_ )
assert isinstance(converted_args["""max_steps"""] ,UpperCAmelCase_ )
with pytest.raises(UpperCAmelCase_ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 704 |
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=2 ,UpperCAmelCase_=8 ,UpperCAmelCase_=True ,UpperCAmelCase_=True ,UpperCAmelCase_=True ,UpperCAmelCase_=True ,UpperCAmelCase_=99 ,UpperCAmelCase_=16 ,UpperCAmelCase_=5 ,UpperCAmelCase_=2 ,UpperCAmelCase_=36 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=16 ,UpperCAmelCase_=2 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=3 ,UpperCAmelCase_=4 ,UpperCAmelCase_=None ,):
_lowercase : Tuple = parent
_lowercase : Dict = batch_size
_lowercase : Optional[Any] = seq_length
_lowercase : Any = is_training
_lowercase : List[str] = use_input_mask
_lowercase : Any = use_token_type_ids
_lowercase : List[str] = use_labels
_lowercase : Tuple = vocab_size
_lowercase : Union[str, Any] = hidden_size
_lowercase : Tuple = num_hidden_layers
_lowercase : Dict = num_attention_heads
_lowercase : List[str] = intermediate_size
_lowercase : Optional[int] = hidden_act
_lowercase : Any = hidden_dropout_prob
_lowercase : int = attention_probs_dropout_prob
_lowercase : List[Any] = max_position_embeddings
_lowercase : Union[str, Any] = type_vocab_size
_lowercase : Tuple = type_sequence_label_size
_lowercase : Union[str, Any] = initializer_range
_lowercase : Dict = num_labels
_lowercase : int = num_choices
_lowercase : int = scope
def lowerCamelCase__ ( self ):
_lowercase : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowercase : List[str] = None
if self.use_input_mask:
_lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Dict = None
if self.use_token_type_ids:
_lowercase : int = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_lowercase : Tuple = None
_lowercase : Optional[int] = None
_lowercase : Tuple = None
if self.use_labels:
_lowercase : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_lowercase : Optional[int] = ids_tensor([self.batch_size] ,self.num_choices )
_lowercase : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self ):
return MraConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=UpperCAmelCase_ ,initializer_range=self.initializer_range ,)
def lowerCamelCase__ ( self ):
_lowercase : Any = self.get_config()
_lowercase : Union[str, Any] = 3_00
return config
def lowerCamelCase__ ( self ):
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : List[str] = self.prepare_config_and_inputs()
_lowercase : int = True
_lowercase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Union[str, Any] = MraModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowercase : str = model(UpperCAmelCase_ ,attention_mask=UpperCAmelCase_ ,token_type_ids=UpperCAmelCase_ )
_lowercase : Union[str, Any] = model(UpperCAmelCase_ ,token_type_ids=UpperCAmelCase_ )
_lowercase : List[str] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
_lowercase : Union[str, Any] = True
_lowercase : Optional[int] = MraModel(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowercase : Dict = model(
UpperCAmelCase_ ,attention_mask=UpperCAmelCase_ ,token_type_ids=UpperCAmelCase_ ,encoder_hidden_states=UpperCAmelCase_ ,encoder_attention_mask=UpperCAmelCase_ ,)
_lowercase : Dict = model(
UpperCAmelCase_ ,attention_mask=UpperCAmelCase_ ,token_type_ids=UpperCAmelCase_ ,encoder_hidden_states=UpperCAmelCase_ ,)
_lowercase : Dict = model(UpperCAmelCase_ ,attention_mask=UpperCAmelCase_ ,token_type_ids=UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : int = MraForMaskedLM(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowercase : Dict = model(UpperCAmelCase_ ,attention_mask=UpperCAmelCase_ ,token_type_ids=UpperCAmelCase_ ,labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Dict = MraForQuestionAnswering(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowercase : str = model(
UpperCAmelCase_ ,attention_mask=UpperCAmelCase_ ,token_type_ids=UpperCAmelCase_ ,start_positions=UpperCAmelCase_ ,end_positions=UpperCAmelCase_ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Optional[Any] = self.num_labels
_lowercase : Optional[int] = MraForSequenceClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowercase : Optional[Any] = model(UpperCAmelCase_ ,attention_mask=UpperCAmelCase_ ,token_type_ids=UpperCAmelCase_ ,labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : str = self.num_labels
_lowercase : List[str] = MraForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowercase : Optional[int] = model(UpperCAmelCase_ ,attention_mask=UpperCAmelCase_ ,token_type_ids=UpperCAmelCase_ ,labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Union[str, Any] = self.num_choices
_lowercase : List[str] = MraForMultipleChoice(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowercase : str = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_lowercase : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_lowercase : List[str] = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_lowercase : List[str] = model(
UpperCAmelCase_ ,attention_mask=UpperCAmelCase_ ,token_type_ids=UpperCAmelCase_ ,labels=UpperCAmelCase_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self ):
_lowercase : str = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Optional[int] = config_and_inputs
_lowercase : Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : Any = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = ()
def lowerCamelCase__ ( self ):
_lowercase : int = MraModelTester(self )
_lowercase : List[str] = ConfigTester(self ,config_class=UpperCAmelCase_ ,hidden_size=37 )
def lowerCamelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self ):
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowercase : List[str] = type
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def lowerCamelCase__ ( self ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Any = MraModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@unittest.skip(reason="""MRA does not output attentions""" )
def lowerCamelCase__ ( self ):
return
@require_torch
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase__ ( self ):
_lowercase : int = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
_lowercase : List[Any] = torch.arange(2_56 ).unsqueeze(0 )
with torch.no_grad():
_lowercase : List[Any] = model(UpperCAmelCase_ )[0]
_lowercase : List[Any] = torch.Size((1, 2_56, 7_68) )
self.assertEqual(output.shape ,UpperCAmelCase_ )
_lowercase : Optional[int] = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,UpperCAmelCase_ ,atol=1E-4 ) )
@slow
def lowerCamelCase__ ( self ):
_lowercase : Dict = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
_lowercase : Optional[int] = torch.arange(2_56 ).unsqueeze(0 )
with torch.no_grad():
_lowercase : Optional[Any] = model(UpperCAmelCase_ )[0]
_lowercase : Tuple = 5_02_65
_lowercase : Tuple = torch.Size((1, 2_56, vocab_size) )
self.assertEqual(output.shape ,UpperCAmelCase_ )
_lowercase : Any = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,UpperCAmelCase_ ,atol=1E-4 ) )
@slow
def lowerCamelCase__ ( self ):
_lowercase : Tuple = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
_lowercase : int = torch.arange(40_96 ).unsqueeze(0 )
with torch.no_grad():
_lowercase : Optional[Any] = model(UpperCAmelCase_ )[0]
_lowercase : Tuple = 5_02_65
_lowercase : str = torch.Size((1, 40_96, vocab_size) )
self.assertEqual(output.shape ,UpperCAmelCase_ )
_lowercase : str = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,UpperCAmelCase_ ,atol=1E-4 ) )
| 600 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def lowercase_ ( __A : str , __A : int , __A : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase : Tuple =UniSpeechSatForSequenceClassification.from_pretrained(_a , config=_a )
lowercase : Any =downstream_dict["""projector.weight"""]
lowercase : Tuple =downstream_dict["""projector.bias"""]
lowercase : Any =downstream_dict["""model.post_net.linear.weight"""]
lowercase : Union[str, Any] =downstream_dict["""model.post_net.linear.bias"""]
return model
def lowercase_ ( __A : List[str] , __A : Dict , __A : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase : str =UniSpeechSatForAudioFrameClassification.from_pretrained(_a , config=_a )
lowercase : Optional[Any] =downstream_dict["""model.linear.weight"""]
lowercase : Union[str, Any] =downstream_dict["""model.linear.bias"""]
return model
def lowercase_ ( __A : Optional[Any] , __A : Optional[Any] , __A : Any ) -> Any:
"""simple docstring"""
lowercase : int =UniSpeechSatForXVector.from_pretrained(_a , config=_a )
lowercase : Optional[int] =downstream_dict["""connector.weight"""]
lowercase : str =downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
lowercase : Any =downstream_dict[
F'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
lowercase : int =downstream_dict[F'model.framelevel_feature_extractor.module.{i}.kernel.bias']
lowercase : List[str] =downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
lowercase : Any =downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
lowercase : int =downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
lowercase : Optional[Any] =downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
lowercase : List[Any] =downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def lowercase_ ( __A : Dict , __A : Tuple , __A : Tuple , __A : Any ) -> Tuple:
"""simple docstring"""
lowercase : List[str] =torch.load(_a , map_location='''cpu''' )
lowercase : Any =checkpoint["""Downstream"""]
lowercase : Tuple =UniSpeechSatConfig.from_pretrained(_a )
lowercase : str =WavaVecaFeatureExtractor.from_pretrained(
_a , return_attention_mask=_a , do_normalize=_a )
lowercase : Dict =hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
lowercase : Tuple =convert_classification(_a , _a , _a )
elif arch.endswith('''ForAudioFrameClassification''' ):
lowercase : Tuple =convert_diarization(_a , _a , _a )
elif arch.endswith('''ForXVector''' ):
lowercase : Any =convert_xvector(_a , _a , _a )
else:
raise NotImplementedError(F'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
lowercase : Optional[Any] =checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(_a )
hf_model.save_pretrained(_a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 94 |
from __future__ import annotations
import numpy as np
def A__ ( _a : np.ndarray ):
'''simple docstring'''
snake_case__ , snake_case__ : str =np.shape(_a )
if rows != columns:
snake_case__ : Any =(
"""'table' has to be of square shaped array but got a """
f"{rows}x{columns} array:\n{table}"
)
raise ValueError(_a )
snake_case__ : Dict =np.zeros((rows, columns) )
snake_case__ : str =np.zeros((rows, columns) )
for i in range(_a ):
for j in range(_a ):
snake_case__ : Optional[int] =sum(lower[i][k] * upper[k][j] for k in range(_a ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
snake_case__ : List[Any] =(table[i][j] - total) / upper[j][j]
snake_case__ : Optional[int] =1
for j in range(_a , _a ):
snake_case__ : int =sum(lower[i][k] * upper[k][j] for k in range(_a ) )
snake_case__ : Dict =table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 385 | 0 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
__lowerCamelCase = logging.get_logger(__name__)
@add_end_docstrings(__A )
class UpperCamelCase__( __A ):
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int:
super().__init__(*__UpperCAmelCase ,**__UpperCAmelCase )
requires_backends(self ,'vision' )
self.check_model_type(__UpperCAmelCase )
def __call__( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Any:
return super().__call__(__UpperCAmelCase ,**__UpperCAmelCase )
def snake_case__ ( self ,**__UpperCAmelCase ) -> Optional[Any]:
return {}, {}, {}
def snake_case__ ( self ,__UpperCAmelCase ) -> Tuple:
A__ = load_image(__UpperCAmelCase )
A__ = image.size
A__ = self.image_processor(images=__UpperCAmelCase ,return_tensors=self.framework )
return model_inputs
def snake_case__ ( self ,__UpperCAmelCase ) -> Optional[Any]:
A__ = self.model(**__UpperCAmelCase )
return model_outputs
def snake_case__ ( self ,__UpperCAmelCase ) -> int:
A__ = model_outputs.predicted_depth
A__ = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) ,size=self.image_size[::-1] ,mode='bicubic' ,align_corners=__UpperCAmelCase )
A__ = prediction.squeeze().cpu().numpy()
A__ = (output * 2_55 / np.max(__UpperCAmelCase )).astype('uint8' )
A__ = Image.fromarray(__UpperCAmelCase )
A__ = {}
A__ = predicted_depth
A__ = depth
return output_dict
| 536 | """simple docstring"""
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase__( __A ):
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> None:
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' ,__UpperCAmelCase ,)
super().__init__(*__UpperCAmelCase ,**__UpperCAmelCase )
| 536 | 1 |
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_a : Optional[Any] = logging.get_logger(__name__)
# General docstring
_a : Optional[Any] = "RegNetConfig"
# Base docstring
_a : int = "facebook/regnet-y-040"
_a : Tuple = [1, 1_088, 7, 7]
# Image classification docstring
_a : List[str] = "facebook/regnet-y-040"
_a : Any = "tabby, tabby cat"
_a : Tuple = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _lowercase ( nn.Module ):
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 3 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : Optional[str] = "relu" , ) -> List[str]:
super().__init__()
__snake_case = nn.Convad(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , padding=kernel_size // 2 , groups=SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ , )
__snake_case = nn.BatchNormad(SCREAMING_SNAKE_CASE_ )
__snake_case = ACTaFN[activation] if activation is not None else nn.Identity()
def a ( self : int , SCREAMING_SNAKE_CASE_ : Dict ) -> Dict:
__snake_case = self.convolution(SCREAMING_SNAKE_CASE_ )
__snake_case = self.normalization(SCREAMING_SNAKE_CASE_ )
__snake_case = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class _lowercase ( nn.Module ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : RegNetConfig ) -> Dict:
super().__init__()
__snake_case = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
__snake_case = config.num_channels
def a ( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Dict:
__snake_case = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
__snake_case = self.embedder(SCREAMING_SNAKE_CASE_ )
return hidden_state
class _lowercase ( nn.Module ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 2 ) -> List[str]:
super().__init__()
__snake_case = nn.Convad(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=1 , stride=SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ )
__snake_case = nn.BatchNormad(SCREAMING_SNAKE_CASE_ )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tensor ) -> Tensor:
__snake_case = self.convolution(SCREAMING_SNAKE_CASE_ )
__snake_case = self.normalization(SCREAMING_SNAKE_CASE_ )
return hidden_state
class _lowercase ( nn.Module ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> int:
super().__init__()
__snake_case = nn.AdaptiveAvgPoolad((1, 1) )
__snake_case = nn.Sequential(
nn.Convad(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=1 ) , nn.Sigmoid() , )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[Any]:
# b c h w -> b c 1 1
__snake_case = self.pooler(SCREAMING_SNAKE_CASE_ )
__snake_case = self.attention(SCREAMING_SNAKE_CASE_ )
__snake_case = hidden_state * attention
return hidden_state
class _lowercase ( nn.Module ):
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 1 ) -> Tuple:
super().__init__()
__snake_case = in_channels != out_channels or stride != 1
__snake_case = max(1 , out_channels // config.groups_width )
__snake_case = (
RegNetShortCut(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ ) if should_apply_shortcut else nn.Identity()
)
__snake_case = nn.Sequential(
RegNetConvLayer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , activation=config.hidden_act ) , RegNetConvLayer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE_ ) , )
__snake_case = ACTaFN[config.hidden_act]
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> List[str]:
__snake_case = hidden_state
__snake_case = self.layer(SCREAMING_SNAKE_CASE_ )
__snake_case = self.shortcut(SCREAMING_SNAKE_CASE_ )
hidden_state += residual
__snake_case = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class _lowercase ( nn.Module ):
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 1 ) -> Optional[Any]:
super().__init__()
__snake_case = in_channels != out_channels or stride != 1
__snake_case = max(1 , out_channels // config.groups_width )
__snake_case = (
RegNetShortCut(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ ) if should_apply_shortcut else nn.Identity()
)
__snake_case = nn.Sequential(
RegNetConvLayer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , activation=config.hidden_act ) , RegNetSELayer(SCREAMING_SNAKE_CASE_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE_ ) , )
__snake_case = ACTaFN[config.hidden_act]
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int ) -> Tuple:
__snake_case = hidden_state
__snake_case = self.layer(SCREAMING_SNAKE_CASE_ )
__snake_case = self.shortcut(SCREAMING_SNAKE_CASE_ )
hidden_state += residual
__snake_case = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class _lowercase ( nn.Module ):
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , ) -> Any:
super().__init__()
__snake_case = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
__snake_case = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , ) , *[layer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for _ in range(depth - 1 )] , )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : str ) -> Any:
__snake_case = self.layers(SCREAMING_SNAKE_CASE_ )
return hidden_state
class _lowercase ( nn.Module ):
def __init__( self : int , SCREAMING_SNAKE_CASE_ : RegNetConfig ) -> Union[str, Any]:
super().__init__()
__snake_case = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
SCREAMING_SNAKE_CASE_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
__snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(SCREAMING_SNAKE_CASE_ , config.depths[1:] ):
self.stages.append(RegNetStage(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , depth=SCREAMING_SNAKE_CASE_ ) )
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tensor , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = True ) -> BaseModelOutputWithNoAttention:
__snake_case = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__snake_case = hidden_states + (hidden_state,)
__snake_case = stage_module(SCREAMING_SNAKE_CASE_ )
if output_hidden_states:
__snake_case = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE_ , hidden_states=SCREAMING_SNAKE_CASE_ )
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = RegNetConfig
_SCREAMING_SNAKE_CASE : Optional[int] = "regnet"
_SCREAMING_SNAKE_CASE : int = "pixel_values"
_SCREAMING_SNAKE_CASE : Optional[int] = True
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any ) -> Optional[Any]:
if isinstance(SCREAMING_SNAKE_CASE_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(SCREAMING_SNAKE_CASE_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def a ( self : int , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple=False ) -> Tuple:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__snake_case = value
_a : Union[str, Any] = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_a : Dict = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , __lowercase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class _lowercase ( __lowercase ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
super().__init__(SCREAMING_SNAKE_CASE_ )
__snake_case = config
__snake_case = RegNetEmbeddings(SCREAMING_SNAKE_CASE_ )
__snake_case = RegNetEncoder(SCREAMING_SNAKE_CASE_ )
__snake_case = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tensor , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
__snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case = self.embedder(SCREAMING_SNAKE_CASE_ )
__snake_case = self.encoder(
SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )
__snake_case = encoder_outputs[0]
__snake_case = self.pooler(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE_ , pooler_output=SCREAMING_SNAKE_CASE_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , __lowercase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class _lowercase ( __lowercase ):
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple ) -> Any:
super().__init__(SCREAMING_SNAKE_CASE_ )
__snake_case = config.num_labels
__snake_case = RegNetModel(SCREAMING_SNAKE_CASE_ )
# classification head
__snake_case = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a ( self : int , SCREAMING_SNAKE_CASE_ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE_ : Optional[torch.LongTensor] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
__snake_case = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case = self.regnet(SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )
__snake_case = outputs.pooler_output if return_dict else outputs[1]
__snake_case = self.classifier(SCREAMING_SNAKE_CASE_ )
__snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__snake_case = 'single_label_classification'
else:
__snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
__snake_case = MSELoss()
if self.num_labels == 1:
__snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__snake_case = loss_fct(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif self.config.problem_type == "single_label_classification":
__snake_case = CrossEntropyLoss()
__snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__snake_case = BCEWithLogitsLoss()
__snake_case = loss_fct(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not return_dict:
__snake_case = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ , hidden_states=outputs.hidden_states )
| 56 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['PLBartTokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'PLBartForCausalLM',
'PLBartForConditionalGeneration',
'PLBartForSequenceClassification',
'PLBartModel',
'PLBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure) | 25 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Any = 1
A_ : int = 3
A_ : List[Any] = (32, 32)
A_ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(_a )
return image
@property
def _a ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : int = UNetaDConditionModel(
block_out_channels=(32, 32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=7 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,attention_head_dim=8 ,use_linear_projection=_a ,only_cross_attention=(True, True, False) ,num_class_embeds=100 ,)
return model
@property
def _a ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : str = AutoencoderKL(
block_out_channels=[32, 32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
return model
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act="""gelu""" ,projection_dim=512 ,)
return CLIPTextModel(_a )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : List[str] = self.dummy_cond_unet_upscale
A_ : int = DDPMScheduler()
A_ : Optional[int] = DDIMScheduler(prediction_type="""v_prediction""" )
A_ : List[str] = self.dummy_vae
A_ : List[Any] = self.dummy_text_encoder
A_ : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A_ : List[str] = self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0]
A_ : Dict = Image.fromarray(np.uinta(_a ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
A_ : Optional[int] = StableDiffusionUpscalePipeline(
unet=_a ,low_res_scheduler=_a ,scheduler=_a ,vae=_a ,text_encoder=_a ,tokenizer=_a ,max_noise_level=350 ,)
A_ : Tuple = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
A_ : Tuple = """A painting of a squirrel eating a burger"""
A_ : Any = torch.Generator(device=_a ).manual_seed(0 )
A_ : Optional[Any] = sd_pipe(
[prompt] ,image=_a ,generator=_a ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="""np""" ,)
A_ : List[str] = output.images
A_ : str = torch.Generator(device=_a ).manual_seed(0 )
A_ : Union[str, Any] = sd_pipe(
[prompt] ,image=_a ,generator=_a ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=_a ,)[0]
A_ : Union[str, Any] = image[0, -3:, -3:, -1]
A_ : Dict = image_from_tuple[0, -3:, -3:, -1]
A_ : Union[str, Any] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
A_ : List[str] = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Tuple ):
'''simple docstring'''
A_ : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : Optional[Any] = self.dummy_cond_unet_upscale
A_ : Tuple = DDPMScheduler()
A_ : List[Any] = DDIMScheduler(prediction_type="""v_prediction""" )
A_ : int = self.dummy_vae
A_ : str = self.dummy_text_encoder
A_ : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A_ : List[str] = self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0]
A_ : Optional[Any] = Image.fromarray(np.uinta(_a ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
A_ : List[str] = StableDiffusionUpscalePipeline(
unet=_a ,low_res_scheduler=_a ,scheduler=_a ,vae=_a ,text_encoder=_a ,tokenizer=_a ,max_noise_level=350 ,)
A_ : Dict = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
A_ : List[Any] = """A painting of a squirrel eating a burger"""
A_ : Union[str, Any] = sd_pipe(
2 * [prompt] ,image=2 * [low_res_image] ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="""np""" ,)
A_ : Tuple = output.images
assert image.shape[0] == 2
A_ : Tuple = torch.Generator(device=_a ).manual_seed(0 )
A_ : List[str] = sd_pipe(
[prompt] ,image=_a ,generator=_a ,num_images_per_prompt=2 ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="""np""" ,)
A_ : Any = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Any = self.dummy_cond_unet_upscale
A_ : Tuple = DDPMScheduler()
A_ : Tuple = DDIMScheduler(prediction_type="""v_prediction""" )
A_ : Optional[Any] = self.dummy_vae
A_ : List[str] = self.dummy_text_encoder
A_ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A_ : Tuple = self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0]
A_ : Tuple = Image.fromarray(np.uinta(_a ) ).convert("""RGB""" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
A_ : Optional[Any] = unet.half()
A_ : Tuple = text_encoder.half()
# make sure here that pndm scheduler skips prk
A_ : List[Any] = StableDiffusionUpscalePipeline(
unet=_a ,low_res_scheduler=_a ,scheduler=_a ,vae=_a ,text_encoder=_a ,tokenizer=_a ,max_noise_level=350 ,)
A_ : str = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
A_ : Optional[int] = """A painting of a squirrel eating a burger"""
A_ : List[Any] = torch.manual_seed(0 )
A_ : str = sd_pipe(
[prompt] ,image=_a ,generator=_a ,num_inference_steps=2 ,output_type="""np""" ,).images
A_ : str = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Dict ):
'''simple docstring'''
A_ : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
A_ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat.npy""" )
A_ : List[str] = """stabilityai/stable-diffusion-x4-upscaler"""
A_ : List[str] = StableDiffusionUpscalePipeline.from_pretrained(_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
A_ : Tuple = """a cat sitting on a park bench"""
A_ : int = torch.manual_seed(0 )
A_ : int = pipe(
prompt=_a ,image=_a ,generator=_a ,output_type="""np""" ,)
A_ : Any = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
A_ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat_fp16.npy""" )
A_ : Tuple = """stabilityai/stable-diffusion-x4-upscaler"""
A_ : int = StableDiffusionUpscalePipeline.from_pretrained(
_a ,torch_dtype=torch.floataa ,)
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
A_ : Any = """a cat sitting on a park bench"""
A_ : Union[str, Any] = torch.manual_seed(0 )
A_ : List[str] = pipe(
prompt=_a ,image=_a ,generator=_a ,output_type="""np""" ,)
A_ : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def _a ( self : Optional[Any] ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A_ : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
A_ : Any = """stabilityai/stable-diffusion-x4-upscaler"""
A_ : Tuple = StableDiffusionUpscalePipeline.from_pretrained(
_a ,torch_dtype=torch.floataa ,)
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
A_ : Tuple = """a cat sitting on a park bench"""
A_ : Any = torch.manual_seed(0 )
A_ : Optional[Any] = pipe(
prompt=_a ,image=_a ,generator=_a ,num_inference_steps=5 ,output_type="""np""" ,)
A_ : List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 27 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__magic_name__ = trt.Logger(trt.Logger.WARNING)
__magic_name__ = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__magic_name__ = logging.getLogger(__name__)
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
__magic_name__ = parser.parse_args()
if args.tokenizer_name:
__magic_name__ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
__magic_name__ = args.per_device_eval_batch_size
__magic_name__ = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__magic_name__ = True
__magic_name__ = 'temp_engine/bert-fp32.engine'
if args.fpaa:
__magic_name__ = 'temp_engine/bert-fp16.engine'
if args.inta:
__magic_name__ = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
__magic_name__ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__magic_name__ = [network.get_input(i) for i in range(network.num_inputs)]
__magic_name__ = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__magic_name__ = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__magic_name__ = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__magic_name__ = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : str , lowerCamelCase : List[str]):
A_ : str = np.asarray(inputs["""input_ids"""] , dtype=np.intaa)
A_ : int = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa)
A_ : Optional[int] = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa)
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCamelCase)
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCamelCase)
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCamelCase)
# start time
A_ : List[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowerCamelCase) for d_inp in d_inputs] + [int(lowerCamelCase), int(lowerCamelCase)] , stream_handle=stream.handle)
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase)
cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Synchronize the stream and take time
stream.synchronize()
# end time
A_ : str = time.time()
A_ : Tuple = end_time - start_time
A_ : Any = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__magic_name__ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__magic_name__ = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__magic_name__ = raw_datasets['validation'].column_names
__magic_name__ = 'question' if 'question' in column_names else column_names[0]
__magic_name__ = 'context' if 'context' in column_names else column_names[1]
__magic_name__ = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__magic_name__ = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
__magic_name__ = min(args.max_seq_length, tokenizer.model_max_length)
def lowerCamelCase ( lowerCamelCase : Dict):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
A_ : List[Any] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
A_ : Optional[int] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCamelCase , stride=args.doc_stride , return_overflowing_tokens=lowerCamelCase , return_offsets_mapping=lowerCamelCase , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
A_ : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""")
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
A_ : Union[str, Any] = []
for i in range(len(tokenized_examples["""input_ids"""])):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
A_ : Any = tokenized_examples.sequence_ids(lowerCamelCase)
A_ : Tuple = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
A_ : Union[str, Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index])
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
A_ : Dict = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i])
]
return tokenized_examples
__magic_name__ = raw_datasets['validation']
# Validation Feature Creation
__magic_name__ = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
__magic_name__ = default_data_collator
__magic_name__ = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
__magic_name__ = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any]="eval"):
# Post-processing: we match the start logits and end logits to answers in the original context.
A_ : Tuple = postprocess_qa_predictions(
examples=lowerCamelCase , features=lowerCamelCase , predictions=lowerCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCamelCase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
A_ : Dict = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
A_ : Union[str, Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
A_ : Any = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowerCamelCase , label_ids=lowerCamelCase)
__magic_name__ = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
return trt.volume(engine.get_binding_shape(lowerCamelCase)) * engine.get_binding_dtype(lowerCamelCase).itemsize
# Allocate device memory for inputs and outputs.
__magic_name__ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__magic_name__ = cuda.mem_alloc(h_outputa.nbytes)
__magic_name__ = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__magic_name__ = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f""" Num examples = {len(eval_dataset)}""")
logger.info(f""" Batch size = {args.per_device_eval_batch_size}""")
__magic_name__ = 0.0
__magic_name__ = 0
__magic_name__ = timeit.default_timer()
__magic_name__ = None
for step, batch in enumerate(eval_dataloader):
__magic_name__ , __magic_name__ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__magic_name__ , __magic_name__ = outputs
__magic_name__ = torch.tensor(start_logits)
__magic_name__ = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__magic_name__ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__magic_name__ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__magic_name__ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__magic_name__ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__magic_name__ = nested_truncate(all_preds, len(eval_dataset))
__magic_name__ = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000))
logger.info('Total Number of Inference = %d', niter)
__magic_name__ = post_processing_function(eval_examples, eval_dataset, all_preds)
__magic_name__ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"""Evaluation metrics: {eval_metric}""")
| 27 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Union[str, Any] = "speech_to_text"
_UpperCamelCase : Optional[int] = ["past_key_values"]
_UpperCamelCase : List[Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , _lowerCAmelCase=1_0_0_0_0 , _lowerCAmelCase=1_2 , _lowerCAmelCase=2_0_4_8 , _lowerCAmelCase=4 , _lowerCAmelCase=6 , _lowerCAmelCase=2_0_4_8 , _lowerCAmelCase=4 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase="relu" , _lowerCAmelCase=2_5_6 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=2 , _lowerCAmelCase=True , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase=6_0_0_0 , _lowerCAmelCase=1_0_2_4 , _lowerCAmelCase=2 , _lowerCAmelCase=(5, 5) , _lowerCAmelCase=1_0_2_4 , _lowerCAmelCase=8_0 , _lowerCAmelCase=1 , **_lowerCAmelCase , ):
_lowercase : Tuple = vocab_size
_lowercase : int = d_model
_lowercase : Tuple = encoder_ffn_dim
_lowercase : Union[str, Any] = encoder_layers
_lowercase : Dict = encoder_attention_heads
_lowercase : int = decoder_ffn_dim
_lowercase : Any = decoder_layers
_lowercase : Optional[int] = decoder_attention_heads
_lowercase : Optional[int] = dropout
_lowercase : Optional[int] = attention_dropout
_lowercase : str = activation_dropout
_lowercase : int = activation_function
_lowercase : Dict = init_std
_lowercase : Any = encoder_layerdrop
_lowercase : Optional[int] = decoder_layerdrop
_lowercase : Tuple = use_cache
_lowercase : int = encoder_layers
_lowercase : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
_lowercase : Any = max_source_positions
_lowercase : Any = max_target_positions
_lowercase : Any = num_conv_layers
_lowercase : Union[str, Any] = list(_lowerCAmelCase )
_lowercase : List[Any] = conv_channels
_lowercase : int = input_feat_per_channel
_lowercase : Union[str, Any] = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
F"""but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
| 66 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __lowerCAmelCase ( a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = DistilBertTokenizer
_SCREAMING_SNAKE_CASE = DistilBertTokenizerFast
_SCREAMING_SNAKE_CASE = True
@slow
def lowerCAmelCase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
snake_case_ = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" )
snake_case_ = tokenizer.encode("sequence builders" , add_special_tokens=_lowerCAmelCase )
snake_case_ = tokenizer.encode("multi-sequence build" , add_special_tokens=_lowerCAmelCase )
snake_case_ = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
snake_case_ = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 283 | 0 |
'''simple docstring'''
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
_lowercase = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
_lowercase = typing.Union[np.floataa, int, float] # noqa: UP007
def lowerCamelCase__ ( a , a ):
return np.sqrt(np.sum((np.asarray(a ) - np.asarray(a )) ** 2 ) )
def lowerCamelCase__ ( a , a ):
return sum((va - va) ** 2 for va, va in zip(a , a ) ) ** (1 / 2)
if __name__ == "__main__":
def lowerCamelCase__ ( ):
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=10000 , globals=globals() , ) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=10000 , globals=globals() , ) )
benchmark()
| 709 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 427 | 0 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase_ ( __snake_case ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
super().__init__()
if hasattr(scheduler.config , 'steps_offset' ) and scheduler.config.steps_offset != 1:
_lowercase : Dict = (
F"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
F""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
'to update the config accordingly as leaving `steps_offset` might led to incorrect results'
' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'
' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'
' file'
)
deprecate('steps_offset!=1' , '1.0.0' , _lowerCAmelCase , standard_warn=_lowerCAmelCase )
_lowercase : List[str] = dict(scheduler.config )
_lowercase : Any = 1
_lowercase : List[Any] = FrozenDict(_lowerCAmelCase )
if hasattr(scheduler.config , 'skip_prk_steps' ) and scheduler.config.skip_prk_steps is False:
_lowercase : Any = (
F"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'
' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'
' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'
' Hub, it would be very nice if you could open a Pull request for the'
' `scheduler/scheduler_config.json` file'
)
deprecate('skip_prk_steps not set' , '1.0.0' , _lowerCAmelCase , standard_warn=_lowerCAmelCase )
_lowercase : str = dict(scheduler.config )
_lowercase : int = True
_lowercase : Union[str, Any] = FrozenDict(_lowerCAmelCase )
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
segmentation_model=_lowerCAmelCase , segmentation_processor=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , )
def __a ( self , _lowerCAmelCase = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_lowercase : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowerCAmelCase )
def __a ( self ):
self.enable_attention_slicing(_lowerCAmelCase )
def __a ( self ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
_lowercase : Tuple = torch.device('cuda' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_lowerCAmelCase , _lowerCAmelCase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __a ( self ):
if self.device != torch.device('meta' ) or not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowerCAmelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 5_1_2 , _lowerCAmelCase = 5_1_2 , _lowerCAmelCase = 5_0 , _lowerCAmelCase = 7.5 , _lowerCAmelCase = None , _lowerCAmelCase = 1 , _lowerCAmelCase = 0.0 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = "pil" , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = 1 , **_lowerCAmelCase , ):
_lowercase : str = self.segmentation_processor(
text=[text] , images=[image] , padding='max_length' , return_tensors='pt' ).to(self.device )
_lowercase : Any = self.segmentation_model(**_lowerCAmelCase )
_lowercase : str = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
_lowercase : Optional[int] = self.numpy_to_pil(_lowerCAmelCase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
_lowercase : str = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , mask_image=_lowerCAmelCase , height=_lowerCAmelCase , width=_lowerCAmelCase , num_inference_steps=_lowerCAmelCase , guidance_scale=_lowerCAmelCase , negative_prompt=_lowerCAmelCase , num_images_per_prompt=_lowerCAmelCase , eta=_lowerCAmelCase , generator=_lowerCAmelCase , latents=_lowerCAmelCase , output_type=_lowerCAmelCase , return_dict=_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=_lowerCAmelCase , )
| 66 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__UpperCamelCase : List[str] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : Any ,*lowercase_ : Dict ,**lowercase_ : List[str] ):
warnings.warn(
'''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ImageGPTImageProcessor instead.''' ,lowercase_ ,)
super().__init__(*lowercase_ ,**lowercase_ )
| 450 | 0 |
from __future__ import annotations
from collections import namedtuple
def lowerCAmelCase( a__ : str , a__ : List[str] , a__ : str ):
'''simple docstring'''
lowerCamelCase__ = namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
'''simple docstring'''
lowerCAmelCase_ = "Alexander Joslin"
import operator as op
from .stack import Stack
def lowerCAmelCase( a__ : str ):
'''simple docstring'''
lowerCamelCase__ = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
lowerCamelCase__ = Stack()
lowerCamelCase__ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(a__ ) )
elif i in operators:
# RULE 2
operator_stack.push(a__ )
elif i == ")":
# RULE 4
lowerCamelCase__ = operator_stack.peek()
operator_stack.pop()
lowerCamelCase__ = operand_stack.peek()
operand_stack.pop()
lowerCamelCase__ = operand_stack.peek()
operand_stack.pop()
lowerCamelCase__ = operators[opr](a__ , a__ )
operand_stack.push(a__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCAmelCase_ = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(f'{equation} = {dijkstras_two_stack_algorithm(equation)}')
| 426 | 0 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Any = ProphetNetTokenizer
A__ : Optional[int] = False
def _a ( self : List[Any] ):
"""simple docstring"""
super().setUp()
A__ = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def _a ( self : Union[str, Any] , _snake_case : str ):
"""simple docstring"""
A__ = 'UNwant\u00E9d,running'
A__ = 'unwanted, running'
return input_text, output_text
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = self.tokenizer_class(self.vocab_file )
A__ = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(A_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [9, 6, 7, 12, 10, 11] )
def _a ( self : List[str] ):
"""simple docstring"""
A__ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def _a ( self : List[str] ):
"""simple docstring"""
A__ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self : Tuple ):
"""simple docstring"""
A__ = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = BasicTokenizer(do_lower_case=A_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def _a ( self : List[str] ):
"""simple docstring"""
A__ = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
A__ = {}
for i, token in enumerate(A_ ):
A__ = i
A__ = WordpieceTokenizer(vocab=A_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
@require_torch
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
A__ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
A__ = [10_37, 21_46, 2_04_23, 20_05, 76_80, 78_49, 39_89, 10_12, 1_02]
A__ = tokenizer(A_ , padding=A_ , return_tensors='pt' )
self.assertIsInstance(A_ , A_ )
A__ = list(batch.input_ids.numpy()[0] )
self.assertListEqual(A_ , A_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def _a ( self : int ):
"""simple docstring"""
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def _a ( self : List[str] ):
"""simple docstring"""
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def _a ( self : int ):
"""simple docstring"""
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def _a ( self : int ):
"""simple docstring"""
A__ = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
A__ = tokenizer.encode('sequence builders' , add_special_tokens=A_ )
A__ = tokenizer.encode('multi-sequence build' , add_special_tokens=A_ )
A__ = tokenizer.build_inputs_with_special_tokens(A_ )
A__ = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == text + [1_02]
assert encoded_pair == text + [1_02] + text_a + [1_02]
| 9 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCamelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase : List[str] = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")\n >>> pipe_prior.to(\"cuda\")\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")\n >>> pipe.to(\"cuda\")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save(\"cat.png\")\n ```\n"
def _SCREAMING_SNAKE_CASE ( lowercase : Any , lowercase : str , lowercase : Any=8 ):
'''simple docstring'''
lowerCamelCase_ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowerCamelCase_ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : str , A_ : UNetaDConditionModel , A_ : DDPMScheduler , A_ : VQModel , ) -> List[str]:
"""simple docstring"""
super().__init__()
self.register_modules(
unet=A_ , scheduler=A_ , movq=A_ , )
lowerCamelCase_ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def a__ ( self : List[Any] , A_ : Tuple , A_ : Dict , A_ : List[Any] , A_ : int , A_ : Any , A_ : Tuple ) -> Any:
"""simple docstring"""
if latents is None:
lowerCamelCase_ = randn_tensor(A_ , generator=A_ , device=A_ , dtype=A_ )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowerCamelCase_ = latents.to(A_ )
lowerCamelCase_ = latents * scheduler.init_noise_sigma
return latents
def a__ ( self : int , A_ : str=0 ) -> Optional[int]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowerCamelCase_ = torch.device(f"""cuda:{gpu_id}""" )
lowerCamelCase_ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A_ , A_ )
def a__ ( self : Tuple , A_ : Union[str, Any]=0 ) -> Dict:
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowerCamelCase_ = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=A_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCamelCase_ = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowerCamelCase_ , lowerCamelCase_ = cpu_offload_with_hook(A_ , A_ , prev_module_hook=A_ )
# We'll offload the last model manually.
lowerCamelCase_ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def a__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(A_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A_ )
def __call__( self : List[Any] , A_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , A_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , A_ : int = 512 , A_ : int = 512 , A_ : int = 100 , A_ : float = 4.0 , A_ : int = 1 , A_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A_ : Optional[torch.FloatTensor] = None , A_ : Optional[str] = "pil" , A_ : bool = True , ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self._execution_device
lowerCamelCase_ = guidance_scale > 1.0
if isinstance(A_ , A_ ):
lowerCamelCase_ = torch.cat(A_ , dim=0 )
lowerCamelCase_ = image_embeds.shape[0] * num_images_per_prompt
if isinstance(A_ , A_ ):
lowerCamelCase_ = torch.cat(A_ , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase_ = image_embeds.repeat_interleave(A_ , dim=0 )
lowerCamelCase_ = negative_image_embeds.repeat_interleave(A_ , dim=0 )
lowerCamelCase_ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=A_ )
self.scheduler.set_timesteps(A_ , device=A_ )
lowerCamelCase_ = self.scheduler.timesteps
lowerCamelCase_ = self.unet.config.in_channels
lowerCamelCase_ , lowerCamelCase_ = downscale_height_and_width(A_ , A_ , self.movq_scale_factor )
# create initial latent
lowerCamelCase_ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , A_ , A_ , A_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ = {'image_embeds': image_embeds}
lowerCamelCase_ = self.unet(
sample=A_ , timestep=A_ , encoder_hidden_states=A_ , added_cond_kwargs=A_ , return_dict=A_ , )[0]
if do_classifier_free_guidance:
lowerCamelCase_ , lowerCamelCase_ = noise_pred.split(latents.shape[1] , dim=1 )
lowerCamelCase_ , lowerCamelCase_ = noise_pred.chunk(2 )
lowerCamelCase_ , lowerCamelCase_ = variance_pred.chunk(2 )
lowerCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCamelCase_ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCamelCase_ , lowerCamelCase_ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ = self.scheduler.step(
A_ , A_ , A_ , generator=A_ , )[0]
# post-processing
lowerCamelCase_ = self.movq.decode(A_ , force_not_quantize=A_ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowerCamelCase_ = image * 0.5 + 0.5
lowerCamelCase_ = image.clamp(0 , 1 )
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
| 70 | 0 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class a_ ( __A , __A ):
"""simple docstring"""
@register_to_config
def __init__( self , *,
_lowerCamelCase = 4 , _lowerCamelCase = 768 , _lowerCamelCase , _lowerCamelCase , ) ->Optional[Any]:
super().__init__()
SCREAMING_SNAKE_CASE : List[str] = nn.Parameter(torch.zeros(UpperCamelCase__ ) )
# parameters for additional clip time embeddings
SCREAMING_SNAKE_CASE : Any = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
# parameters for encoder hidden states
SCREAMING_SNAKE_CASE : str = clip_extra_context_tokens
SCREAMING_SNAKE_CASE : str = nn.Linear(
UpperCamelCase__ , self.clip_extra_context_tokens * cross_attention_dim )
SCREAMING_SNAKE_CASE : Tuple = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.LayerNorm(UpperCamelCase__ )
def __lowerCAmelCase ( self , *, _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->List[str]:
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
SCREAMING_SNAKE_CASE : Union[str, Any] = image_embeddings.shape[0]
SCREAMING_SNAKE_CASE : List[str] = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_free_guidance_embeddings.expand(
UpperCamelCase__ , -1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
SCREAMING_SNAKE_CASE : List[Any] = self.embedding_proj(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.clip_image_embeddings_project_to_time_embeddings(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
SCREAMING_SNAKE_CASE : Optional[Any] = self.clip_extra_context_tokens_proj(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = clip_extra_context_tokens.reshape(UpperCamelCase__ , -1 , self.clip_extra_context_tokens )
SCREAMING_SNAKE_CASE : Any = clip_extra_context_tokens.permute(0 , 2 , 1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.encoder_hidden_states_proj(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.text_encoder_hidden_states_norm(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 709 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=32 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=[10, 20, 30, 40] , _lowerCamelCase=[2, 2, 3, 2] , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=10 , _lowerCamelCase=0.0_2 , _lowerCamelCase=["stage2", "stage3", "stage4"] , _lowerCamelCase=3 , _lowerCamelCase=None , ) ->Dict:
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : Tuple = image_size
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = num_stages
SCREAMING_SNAKE_CASE : str = hidden_sizes
SCREAMING_SNAKE_CASE : List[str] = depths
SCREAMING_SNAKE_CASE : Tuple = is_training
SCREAMING_SNAKE_CASE : str = use_labels
SCREAMING_SNAKE_CASE : List[str] = intermediate_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Dict = type_sequence_label_size
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = out_features
SCREAMING_SNAKE_CASE : List[Any] = num_labels
SCREAMING_SNAKE_CASE : str = scope
SCREAMING_SNAKE_CASE : str = num_stages
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Any = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ) ->Dict:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def __lowerCAmelCase ( self ) ->List[Any]:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_lowerCamelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=_lowerCamelCase , loss_ignore_index=255 , num_labels=self.num_labels , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = UperNetForSemanticSegmentation(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : Union[str, Any] = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : int = UperNetModelTester(self )
SCREAMING_SNAKE_CASE : Tuple = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def __lowerCAmelCase ( self ) ->Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self ) ->Optional[int]:
return
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : str = model_class(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : str = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCamelCase )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def __lowerCAmelCase ( self ) ->Optional[Any]:
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def __lowerCAmelCase ( self ) ->str:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def __lowerCAmelCase ( self ) ->Optional[Any]:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def __lowerCAmelCase ( self ) ->Optional[int]:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def __lowerCAmelCase ( self ) ->Dict:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowerCAmelCase ( self ) ->List[str]:
pass
def __lowerCAmelCase ( self ) ->Union[str, Any]:
def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : List[Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : List[str] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Any = _config_zero_init(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[str] = model_class(config=_lowerCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def __lowerCAmelCase ( self ) ->Dict:
pass
@slow
def __lowerCAmelCase ( self ) ->Union[str, Any]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[Any] = UperNetForSemanticSegmentation.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' )
SCREAMING_SNAKE_CASE : int = Image.open(a__ ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
SCREAMING_SNAKE_CASE : Tuple = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = prepare_img()
SCREAMING_SNAKE_CASE : int = processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = model(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = prepare_img()
SCREAMING_SNAKE_CASE : Any = processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
| 333 | 0 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''', _a, )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : List[Any] = RobertaConfig
lowerCamelCase_ : Union[str, Any] = '''roberta'''
def __init__(self , __magic_name__ ) -> List[str]:
'''simple docstring'''
super().__init__(__magic_name__ )
snake_case_ : Optional[Any] = RobertaEmbeddings(__magic_name__ )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''', _a, )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Optional[Any] = RobertaConfig
lowerCamelCase_ : Optional[Any] = '''roberta'''
def __init__(self , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(__magic_name__ )
snake_case_ : Any = config.num_labels
snake_case_ : int = config.num_hidden_layers
snake_case_ : List[Any] = DeeRobertaModel(__magic_name__ )
snake_case_ : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
snake_case_ : Union[str, Any] = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(__magic_name__ )
def lowerCamelCase (self , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=-1 , __magic_name__=False , ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.num_layers
try:
snake_case_ : Any = self.roberta(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , position_ids=__magic_name__ , head_mask=__magic_name__ , inputs_embeds=__magic_name__ , )
snake_case_ : Optional[int] = outputs[1]
snake_case_ : Any = self.dropout(__magic_name__ )
snake_case_ : Any = self.classifier(__magic_name__ )
snake_case_ : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case_ : Tuple = e.message
snake_case_ : int = e.exit_layer
snake_case_ : List[Any] = outputs[0]
if not self.training:
snake_case_ : int = entropy(__magic_name__ )
snake_case_ : Dict = []
snake_case_ : List[Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case_ : Union[str, Any] = MSELoss()
snake_case_ : List[Any] = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : int = CrossEntropyLoss()
snake_case_ : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case_ : Dict = []
for highway_exit in outputs[-1]:
snake_case_ : int = highway_exit[0]
if not self.training:
highway_logits_all.append(__magic_name__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case_ : Dict = MSELoss()
snake_case_ : Optional[Any] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Optional[Any] = CrossEntropyLoss()
snake_case_ : Dict = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__magic_name__ )
if train_highway:
snake_case_ : Any = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case_ : int = (loss,) + outputs
if not self.training:
snake_case_ : Tuple = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case_ : List[Any] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 60 |
'''simple docstring'''
from __future__ import annotations
def __A ( a_ : float ,a_ : float ,a_ : float ,):
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 525 | 0 |
def _a ( lowerCamelCase__ = 1_00 ) -> int:
lowerCamelCase_ : List[str] = set()
lowerCamelCase_ : Tuple = 0
lowerCamelCase_ : Any = n + 1 # maximum limit
for a in range(2 , __UpperCamelCase ):
for b in range(2 , __UpperCamelCase ):
lowerCamelCase_ : List[str] = a**b # calculates the current power
collect_powers.add(__UpperCamelCase ) # adds the result to the set
return len(__UpperCamelCase )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 713 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase )
class lowerCamelCase__ ( UpperCAmelCase ):
def __init__(self : List[Any] , **_snake_case : int ) -> Dict:
"""simple docstring"""
super().__init__(**_snake_case )
if self.framework == "tf":
raise ValueError(f'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , 'vision' )
self.check_model_type(_snake_case )
def __call__(self : int , _snake_case : Union[str, "Image.Image", List[Dict[str, Any]]] , _snake_case : Union[str, List[str]] = None , **_snake_case : Optional[int] , ) -> Dict:
"""simple docstring"""
if "text_queries" in kwargs:
lowerCamelCase_ : Tuple = kwargs.pop('text_queries' )
if isinstance(_snake_case , (str, Image.Image) ):
lowerCamelCase_ : Union[str, Any] = {'image': image, 'candidate_labels': candidate_labels}
else:
lowerCamelCase_ : List[str] = image
lowerCamelCase_ : List[Any] = super().__call__(_snake_case , **_snake_case )
return results
def UpperCAmelCase_ (self : Union[str, Any] , **_snake_case : Any ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : Dict = {}
if "threshold" in kwargs:
lowerCamelCase_ : int = kwargs['threshold']
if "top_k" in kwargs:
lowerCamelCase_ : Any = kwargs['top_k']
return {}, {}, postprocess_params
def UpperCAmelCase_ (self : Tuple , _snake_case : int ) -> int:
"""simple docstring"""
lowerCamelCase_ : List[str] = load_image(inputs['image'] )
lowerCamelCase_ : Any = inputs['candidate_labels']
if isinstance(_snake_case , _snake_case ):
lowerCamelCase_ : Any = candidate_labels.split(',' )
lowerCamelCase_ : List[Any] = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(_snake_case ):
lowerCamelCase_ : str = self.tokenizer(_snake_case , return_tensors=self.framework )
lowerCamelCase_ : Union[str, Any] = self.image_processor(_snake_case , return_tensors=self.framework )
yield {
"is_last": i == len(_snake_case ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def UpperCAmelCase_ (self : List[Any] , _snake_case : int ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : List[Any] = model_inputs.pop('target_size' )
lowerCamelCase_ : int = model_inputs.pop('candidate_label' )
lowerCamelCase_ : Tuple = model_inputs.pop('is_last' )
lowerCamelCase_ : Optional[Any] = self.model(**_snake_case )
lowerCamelCase_ : Optional[Any] = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs}
return model_outputs
def UpperCAmelCase_ (self : Optional[int] , _snake_case : Optional[Any] , _snake_case : Any=0.1 , _snake_case : Union[str, Any]=None ) -> Any:
"""simple docstring"""
lowerCamelCase_ : Dict = []
for model_output in model_outputs:
lowerCamelCase_ : str = model_output['candidate_label']
lowerCamelCase_ : Optional[int] = BaseModelOutput(_snake_case )
lowerCamelCase_ : List[str] = self.image_processor.post_process_object_detection(
outputs=_snake_case , threshold=_snake_case , target_sizes=model_output['target_size'] )[0]
for index in outputs["scores"].nonzero():
lowerCamelCase_ : Tuple = outputs['scores'][index].item()
lowerCamelCase_ : List[Any] = self._get_bounding_box(outputs['boxes'][index][0] )
lowerCamelCase_ : Dict = {'score': score, 'label': label, 'box': box}
results.append(_snake_case )
lowerCamelCase_ : int = sorted(_snake_case , key=lambda _snake_case : x["score"] , reverse=_snake_case )
if top_k:
lowerCamelCase_ : List[Any] = results[:top_k]
return results
def UpperCAmelCase_ (self : List[Any] , _snake_case : "torch.Tensor" ) -> Dict[str, int]:
"""simple docstring"""
if self.framework != "pt":
raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.' )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : List[Any] = box.int().tolist()
lowerCamelCase_ : Tuple = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 144 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class __magic_name__ ( snake_case_ ):
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
self.test()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Tuple = 0
__A : Tuple = False
while not completed:
if counter == 1:
self.reset()
__A : Any = self.advance()
if not self.does_advance(lowerCamelCase ):
raise Exception(
"Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true." )
__A : int = self.update(lowerCamelCase )
counter += 1
if counter > 1_0000:
raise Exception("update() does not fulfill the constraint." )
if self.remaining() != 0:
raise Exception("Custom Constraint is not defined correctly." )
@abstractmethod
def lowerCAmelCase__ ( self ):
'''simple docstring'''
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def lowerCAmelCase__ ( self , lowerCamelCase ):
'''simple docstring'''
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def lowerCAmelCase__ ( self , lowerCamelCase ):
'''simple docstring'''
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def lowerCAmelCase__ ( self ):
'''simple docstring'''
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def lowerCAmelCase__ ( self ):
'''simple docstring'''
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def lowerCAmelCase__ ( self , lowerCamelCase=False ):
'''simple docstring'''
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class __magic_name__ ( snake_case_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase ):
'''simple docstring'''
super(lowerCamelCase , self ).__init__()
if not isinstance(lowerCamelCase , lowerCamelCase ) or len(lowerCamelCase ) == 0:
raise ValueError(f"`token_ids` has to be a non-empty list, but is {token_ids}." )
if any((not isinstance(lowerCamelCase , lowerCamelCase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f"Each list in `token_ids` has to be a list of positive integers, but is {token_ids}." )
__A : List[Any] = token_ids
__A : Any = len(self.token_ids )
__A : Any = -1 # the index of the currently fulfilled step
__A : Optional[int] = False
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def lowerCAmelCase__ ( self , lowerCamelCase ):
'''simple docstring'''
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise ValueError(f"`token_id` has to be an `int`, but is {token_id} of type {type(lowerCamelCase )}" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def lowerCAmelCase__ ( self , lowerCamelCase ):
'''simple docstring'''
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise ValueError(f"`token_id` has to be an `int`, but is {token_id} of type {type(lowerCamelCase )}" )
__A : int = False
__A : Any = False
__A : List[Any] = False
if self.does_advance(lowerCamelCase ):
self.fulfilled_idx += 1
__A : Dict = True
if self.fulfilled_idx == (self.seqlen - 1):
__A : Optional[int] = True
__A : Optional[Any] = completed
else:
# failed to make progress.
__A : int = True
self.reset()
return stepped, completed, reset
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Tuple = False
__A : str = 0
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.seqlen - (self.fulfilled_idx + 1)
def lowerCAmelCase__ ( self , lowerCamelCase=False ):
'''simple docstring'''
__A : Any = PhrasalConstraint(self.token_ids )
if stateful:
__A : Dict = self.seqlen
__A : List[str] = self.fulfilled_idx
__A : Union[str, Any] = self.completed
return new_constraint
class __magic_name__ :
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=True ):
'''simple docstring'''
__A : List[str] = max([len(lowerCamelCase ) for one in nested_token_ids] )
__A : Optional[int] = {}
for token_ids in nested_token_ids:
__A : Tuple = root
for tidx, token_id in enumerate(lowerCamelCase ):
if token_id not in level:
__A : Union[str, Any] = {}
__A : Union[str, Any] = level[token_id]
if no_subsets and self.has_subsets(lowerCamelCase , lowerCamelCase ):
raise ValueError(
"Each list in `nested_token_ids` can\'t be a complete subset of another list, but is"
f" {nested_token_ids}." )
__A : Optional[int] = root
def lowerCAmelCase__ ( self , lowerCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = self.trie
for current_token in current_seq:
__A : str = start[current_token]
__A : List[Any] = list(start.keys() )
return next_tokens
def lowerCAmelCase__ ( self , lowerCamelCase ):
'''simple docstring'''
__A : Dict = self.next_tokens(lowerCamelCase )
return len(lowerCamelCase ) == 0
def lowerCAmelCase__ ( self , lowerCamelCase ):
'''simple docstring'''
__A : int = list(root.values() )
if len(lowerCamelCase ) == 0:
return 1
else:
return sum([self.count_leaves(lowerCamelCase ) for nn in next_nodes] )
def lowerCAmelCase__ ( self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__A : str = self.count_leaves(lowerCamelCase )
return len(lowerCamelCase ) != leaf_count
class __magic_name__ ( snake_case_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase ):
'''simple docstring'''
super(lowerCamelCase , self ).__init__()
if not isinstance(lowerCamelCase , lowerCamelCase ) or len(lowerCamelCase ) == 0:
raise ValueError(f"`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}." )
if any(not isinstance(lowerCamelCase , lowerCamelCase ) for token_ids in nested_token_ids ):
raise ValueError(f"`nested_token_ids` has to be a list of lists, but is {nested_token_ids}." )
if any(
any((not isinstance(lowerCamelCase , lowerCamelCase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f"Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}." )
__A : str = DisjunctiveTrie(lowerCamelCase )
__A : Tuple = nested_token_ids
__A : Dict = self.trie.max_height
__A : List[Any] = []
__A : List[Any] = False
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Any = self.trie.next_tokens(self.current_seq )
if len(lowerCamelCase ) == 0:
return None
else:
return token_list
def lowerCAmelCase__ ( self , lowerCamelCase ):
'''simple docstring'''
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise ValueError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(lowerCamelCase )}" )
__A : Union[str, Any] = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def lowerCAmelCase__ ( self , lowerCamelCase ):
'''simple docstring'''
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise ValueError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(lowerCamelCase )}" )
__A : str = False
__A : int = False
__A : Tuple = False
if self.does_advance(lowerCamelCase ):
self.current_seq.append(lowerCamelCase )
__A : Tuple = True
else:
__A : List[str] = True
self.reset()
__A : Dict = self.trie.reached_leaf(self.current_seq )
__A : Union[str, Any] = completed
return stepped, completed, reset
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Any = False
__A : List[str] = []
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def lowerCAmelCase__ ( self , lowerCamelCase=False ):
'''simple docstring'''
__A : Optional[int] = DisjunctiveConstraint(self.token_ids )
if stateful:
__A : Any = self.seqlen
__A : Tuple = self.current_seq
__A : Optional[int] = self.completed
return new_constraint
class __magic_name__ :
"""simple docstring"""
def __init__( self , lowerCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = constraints
# max # of steps required to fulfill a given constraint
__A : str = max([c.seqlen for c in constraints] )
__A : str = len(lowerCamelCase )
__A : Optional[Any] = False
self.init_state()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : List[str] = []
__A : str = None
__A : int = [constraint.copy(stateful=lowerCamelCase ) for constraint in self.constraints]
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Optional[int] = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : List[str] = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
__A : int = constraint.advance()
if isinstance(lowerCamelCase , lowerCamelCase ):
token_list.append(lowerCamelCase )
elif isinstance(lowerCamelCase , lowerCamelCase ):
token_list.extend(lowerCamelCase )
else:
__A : List[Any] = self.inprogress_constraint.advance()
if isinstance(lowerCamelCase , lowerCamelCase ):
token_list.append(lowerCamelCase )
elif isinstance(lowerCamelCase , lowerCamelCase ):
token_list.extend(lowerCamelCase )
if len(lowerCamelCase ) == 0:
return None
else:
return token_list
def lowerCAmelCase__ ( self , lowerCamelCase ):
'''simple docstring'''
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
__A : Tuple = self.add(lowerCamelCase )
# the entire list of constraints are fulfilled
if self.completed:
break
def lowerCAmelCase__ ( self , lowerCamelCase ):
'''simple docstring'''
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise ValueError(f"`token_id` should be an `int`, but is `{token_id}`." )
__A : Optional[int] = False, False
if self.completed:
__A : str = True
__A : int = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
__A : Any = self.inprogress_constraint.update(lowerCamelCase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=lowerCamelCase ) )
__A : List[Any] = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
__A : str = None
if len(self.pending_constraints ) == 0:
# we're done!
__A : Any = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(lowerCamelCase ):
__A : Tuple = pending_constraint.update(lowerCamelCase )
if not stepped:
raise Exception(
"`constraint.update(token_id)` is not yielding incremental progress, "
"even though `constraint.does_advance(token_id)` is true." )
if complete:
self.complete_constraints.append(lowerCamelCase )
__A : Tuple = None
if not complete and stepped:
__A : List[Any] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
__A : Union[str, Any] = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
__A : Dict = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def lowerCAmelCase__ ( self , lowerCamelCase=True ):
'''simple docstring'''
__A : Dict = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
__A : Dict = [
constraint.copy(stateful=lowerCamelCase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
__A : List[str] = self.inprogress_constraint.copy(stateful=lowerCamelCase )
__A : List[str] = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 111 | import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def __lowercase ( lowerCamelCase : Optional[Any] ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _lowercase ( nn.Module ):
def __init__( self : Optional[int] , snake_case : nn.Module , snake_case : int ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
UpperCamelCase_ : str = module
UpperCamelCase_ : Optional[int] = nn.Sequential(
nn.Linear(module.in_features , snake_case , bias=snake_case ) , nn.Linear(snake_case , module.out_features , bias=snake_case ) , )
UpperCamelCase_ : Optional[int] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=snake_case )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case : Dict , *snake_case : Any , **snake_case : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return self.module(snake_case , *snake_case , **snake_case ) + self.adapter(snake_case )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
lowercase = 'bigscience/bloom-1b7'
# Constant values
lowercase = 2.109_6595_5269_2574
lowercase = 'Hello my name is'
lowercase = set()
EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' )
EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' )
EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' )
lowercase = 1_0
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCamelCase_ : List[Any] = AutoTokenizer.from_pretrained(self.model_name )
class _lowercase ( snake_case_ ):
def SCREAMING_SNAKE_CASE__ ( self : str ) -> int:
"""simple docstring"""
super().setUp()
# Models and tokenizer
UpperCamelCase_ : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
UpperCamelCase_ : int = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=snake_case , device_map='auto' )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = self.model_abit.config
self.assertTrue(hasattr(snake_case , 'quantization_config' ) )
UpperCamelCase_ : str = config.to_dict()
UpperCamelCase_ : List[Any] = config.to_diff_dict()
UpperCamelCase_ : Any = config.to_json_string()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
UpperCamelCase_ : Union[str, Any] = self.model_fpaa.get_memory_footprint()
UpperCamelCase_ : Optional[Any] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
UpperCamelCase_ : Optional[Any] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(snake_case , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='pt' )
UpperCamelCase_ : Tuple = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=snake_case ) , self.EXPECTED_OUTPUTS )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> str:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = BitsAndBytesConfig()
UpperCamelCase_ : List[Any] = True
UpperCamelCase_ : Tuple = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=snake_case , device_map='auto' )
UpperCamelCase_ : Any = self.tokenizer(self.input_text , return_tensors='pt' )
UpperCamelCase_ : int = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=snake_case ) , self.EXPECTED_OUTPUTS )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
with self.assertRaises(snake_case ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = BitsAndBytesConfig()
with self.assertRaises(snake_case ):
UpperCamelCase_ : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=snake_case , load_in_abit=snake_case , device_map='auto' , bnb_abit_quant_type='nf4' , )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
with self.assertRaises(snake_case ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(snake_case ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(snake_case ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(snake_case ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(snake_case ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
UpperCamelCase_ : Optional[int] = self.tokenizer(self.input_text , return_tensors='pt' )
UpperCamelCase_ : Dict = self.model_fpaa.to(torch.floataa )
UpperCamelCase_ : Optional[int] = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=1_0 )
# Check this does not throw an error
UpperCamelCase_ : Union[str, Any] = self.model_fpaa.to('cpu' )
# Check this does not throw an error
UpperCamelCase_ : str = self.model_fpaa.half()
# Check this does not throw an error
UpperCamelCase_ : Optional[int] = self.model_fpaa.float()
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=snake_case , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[str] ) -> str:
"""simple docstring"""
UpperCamelCase_ : str = 't5-small'
UpperCamelCase_ : Tuple = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
UpperCamelCase_ : Tuple = AutoTokenizer.from_pretrained(cls.model_name )
UpperCamelCase_ : Tuple = 'Translate in German: Hello, my dog is cute'
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
from transformers import TaForConditionalGeneration
UpperCamelCase_ : List[str] = TaForConditionalGeneration._keep_in_fpaa_modules
UpperCamelCase_ : Optional[Any] = None
# test with `t5-small`
UpperCamelCase_ : Optional[int] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=snake_case , device_map='auto' )
UpperCamelCase_ : int = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
UpperCamelCase_ : str = model.generate(**snake_case )
# test with `flan-t5-small`
UpperCamelCase_ : Any = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=snake_case , device_map='auto' )
UpperCamelCase_ : int = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
UpperCamelCase_ : Dict = model.generate(**snake_case )
UpperCamelCase_ : int = modules
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
UpperCamelCase_ : Optional[int] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=snake_case , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
UpperCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
UpperCamelCase_ : Any = model.generate(**snake_case )
# test with `flan-t5-small`
UpperCamelCase_ : List[str] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=snake_case , device_map='auto' )
UpperCamelCase_ : Optional[int] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
UpperCamelCase_ : Optional[Any] = model.generate(**snake_case )
class _lowercase ( snake_case_ ):
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
super().setUp()
# model_name
UpperCamelCase_ : Union[str, Any] = 'bigscience/bloom-560m'
UpperCamelCase_ : List[Any] = 't5-small'
# Different types of model
UpperCamelCase_ : Any = AutoModel.from_pretrained(self.model_name , load_in_abit=snake_case , device_map='auto' )
# Sequence classification model
UpperCamelCase_ : List[Any] = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=snake_case , device_map='auto' )
# CausalLM model
UpperCamelCase_ : int = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=snake_case , device_map='auto' )
# Seq2seq model
UpperCamelCase_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=snake_case , device_map='auto' )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class _lowercase ( snake_case_ ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
super().setUp()
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
UpperCamelCase_ : Dict = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class _lowercase ( snake_case_ ):
def SCREAMING_SNAKE_CASE__ ( self : int ) -> int:
"""simple docstring"""
super().setUp()
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Tuple = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=snake_case , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
UpperCamelCase_ : Optional[Any] = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
UpperCamelCase_ : List[str] = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=snake_case ) , self.EXPECTED_OUTPUTS )
class _lowercase ( snake_case_ ):
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : List[str] = 'facebook/opt-350m'
super().setUp()
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any:
"""simple docstring"""
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
UpperCamelCase_ : int = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=snake_case )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
UpperCamelCase_ : str = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
UpperCamelCase_ : Dict = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(snake_case ) ):
UpperCamelCase_ : Any = LoRALayer(module.q_proj , rank=1_6 )
UpperCamelCase_ : Union[str, Any] = LoRALayer(module.k_proj , rank=1_6 )
UpperCamelCase_ : Optional[int] = LoRALayer(module.v_proj , rank=1_6 )
# Step 3: dummy batch
UpperCamelCase_ : Tuple = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
UpperCamelCase_ : Optional[Any] = model.forward(**snake_case )
out.logits.norm().backward()
for module in model.modules():
if isinstance(snake_case , snake_case ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(snake_case , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class _lowercase ( snake_case_ ):
lowercase = 'gpt2-xl'
lowercase = 3.3191_8548_5415_2187
| 417 | 0 |
"""simple docstring"""
from functools import lru_cache
@lru_cache
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> int:
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 718 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
SCREAMING_SNAKE_CASE_ = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 370 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class a :
def __init__( self , UpperCamelCase_ = 6 ):
UpperCAmelCase__ : Node | None = None
UpperCAmelCase__ : Node | None = None
self.create_linked_list(UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = Node()
UpperCAmelCase__ : Union[str, Any] = current_node
UpperCAmelCase__ : Any = current_node
UpperCAmelCase__ : Dict = current_node
for _ in range(1 , UpperCamelCase_ ):
UpperCAmelCase__ : Any = Node()
UpperCAmelCase__ : List[Any] = current_node
UpperCAmelCase__ : List[str] = previous_node
UpperCAmelCase__ : Any = current_node
UpperCAmelCase__ : Optional[int] = self.front
UpperCAmelCase__ : List[str] = previous_node
def __snake_case ( self ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def __snake_case ( self ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def __snake_case ( self , UpperCamelCase_ ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
UpperCAmelCase__ : List[Any] = self.rear.next
if self.rear:
UpperCAmelCase__ : int = data
def __snake_case ( self ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
UpperCAmelCase__ : Tuple = self.front.data
UpperCAmelCase__ : Optional[int] = None
return data
UpperCAmelCase__ : Optional[int] = self.front
UpperCAmelCase__ : Optional[int] = old_front.next
UpperCAmelCase__ : List[str] = old_front.data
UpperCAmelCase__ : str = None
return data
def __snake_case ( self ):
if self.is_empty():
raise Exception('Empty Queue' )
def __snake_case ( self ):
if self.rear and self.rear.next == self.front:
raise Exception('Full Queue' )
class a :
def __init__( self ):
UpperCAmelCase__ : Any | None = None
UpperCAmelCase__ : Node | None = None
UpperCAmelCase__ : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 110 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 110 | 1 |
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = '▁'
snake_case__ = {'vocab_file': 'vocab.txt', 'sentencepiece_model_ckpt': 'sentencepiece.bpe.model'}
snake_case__ = {
'sentencepiece_model_file': 'sentencepiece.bpe.model',
'vocab_file': 'vocab.txt',
}
snake_case__ = {
'vocab_file': {
'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt',
'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt',
},
'sentencepiece_model_file': {
'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model',
'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model',
},
}
snake_case__ = {
'ernie-m-base': 514,
'ernie-m-large': 514,
}
snake_case__ = {
'ernie-m-base': {'do_lower_case': False},
'ernie-m-large': {'do_lower_case': False},
}
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = ["input_ids"]
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_INIT_CONFIGURATION
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = RESOURCE_FILES_NAMES
def __init__( self , A_ , A_=None , A_=False , A_="utf8" , A_="[UNK]" , A_="[SEP]" , A_="[PAD]" , A_="[CLS]" , A_="[MASK]" , A_ = None , **A_ , ) -> None:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , vocab_file=A_ , encoding=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
_lowerCamelCase = do_lower_case
_lowerCamelCase = sentencepiece_model_ckpt
_lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A_ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
_lowerCamelCase = self.load_vocab(filepath=A_ )
else:
_lowerCamelCase = {self.sp_model.id_to_piece(A_ ): id for id in range(self.sp_model.get_piece_size() )}
_lowerCamelCase = {v: k for k, v in self.vocab.items()}
def UpperCamelCase_ ( self , A_ ) -> str:
"""simple docstring"""
if text is None:
return None
_lowerCamelCase = self.tokenize(A_ )
_lowerCamelCase , _lowerCamelCase = '''''', []
for i, ch in enumerate(A_ ):
if ch in self.SP_CHAR_MAPPING:
_lowerCamelCase = self.SP_CHAR_MAPPING.get(A_ )
else:
_lowerCamelCase = unicodedata.normalize('''NFKC''' , A_ )
if self.is_whitespace(A_ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(A_ ) )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = normalized_text, [], 0
if self.do_lower_case:
_lowerCamelCase = text.lower()
for token in split_tokens:
if token[:1] == "▁":
_lowerCamelCase = token[1:]
_lowerCamelCase = text[offset:].index(A_ ) + offset
_lowerCamelCase = start + len(A_ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
_lowerCamelCase = end
return token_mapping
@property
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
return len(self.vocab )
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = self.__dict__.copy()
_lowerCamelCase = None
return state
def __setstate__( self , A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_lowerCamelCase = {}
_lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(A_ , A_ ) for c in text) )
def UpperCamelCase_ ( self , A_ , A_=False , A_=64 , A_=0.1 ) -> List[str]:
"""simple docstring"""
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
_lowerCamelCase = True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
_lowerCamelCase = self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
_lowerCamelCase = self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
_lowerCamelCase = self.sp_model.EncodeAsPieces(A_ )
else:
_lowerCamelCase = self.sp_model.SampleEncodeAsPieces(A_ , A_ , A_ )
_lowerCamelCase = []
for pi, piece in enumerate(A_ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(A_ ) and pi != 0:
new_pieces.append(A_ )
continue
else:
continue
_lowerCamelCase = 0
for i, chunk in enumerate(A_ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(A_ ) or self.is_punct(A_ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(A_ )
_lowerCamelCase = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
_lowerCamelCase = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
_lowerCamelCase = i
if len(A_ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCamelCase_ ( self , A_ ) -> List[str]:
"""simple docstring"""
_lowerCamelCase = ''''''.join(A_ ).replace(A_ , ''' ''' ).strip()
return out_string
def UpperCamelCase_ ( self , A_ ) -> Any:
"""simple docstring"""
_lowerCamelCase = self.convert_ids_to_tokens(A_ )
_lowerCamelCase = ''''''.join(A_ ).replace(A_ , ''' ''' ).strip()
return out_string
def UpperCamelCase_ ( self , A_ ) -> List[str]:
"""simple docstring"""
return self.vocab.get(A_ , self.vocab.get(self.unk_token ) )
def UpperCamelCase_ ( self , A_ ) -> Union[str, Any]:
"""simple docstring"""
return self.reverse_vocab.get(A_ , self.unk_token )
def UpperCamelCase_ ( self , A_ , A_=None ) -> List[str]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
_lowerCamelCase = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCamelCase_ ( self , A_ , A_=None ) -> List[Any]:
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCamelCase_ ( self , A_ , A_=None , A_=False ) -> List[str]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1]
def UpperCamelCase_ ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(A_ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(A_ ) + 1) + [1] * (len(A_ ) + 3)
def UpperCamelCase_ ( self , A_ ) -> Dict:
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCamelCase_ ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCamelCase_ ( self , A_ ) -> Union[str, Any]:
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(A_ ) == 1:
_lowerCamelCase = unicodedata.category(A_ )
if cat == "Zs":
return True
return False
def UpperCamelCase_ ( self , A_ ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = {}
with io.open(A_ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(A_ ):
_lowerCamelCase = line.rstrip('''\n''' )
_lowerCamelCase = int(A_ )
return token_to_idx
def UpperCamelCase_ ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
_lowerCamelCase = 0
if os.path.isdir(A_ ):
_lowerCamelCase = os.path.join(
A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
_lowerCamelCase = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(A_ , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda A_ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
''' Please check that the vocabulary is not corrupted!''' )
_lowerCamelCase = token_index
writer.write(token + '''\n''' )
index += 1
_lowerCamelCase = os.path.join(A_ , '''sentencepiece.bpe.model''' )
with open(A_ , '''wb''' ) as fi:
_lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (vocab_file,) | 638 | from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 42
class UpperCamelCase ( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self , A_ = 6_55_36 , A_ = None , A_ = 2 , A_ = 2 , A_ = 0 , A_ = "fourier" , A_ = True , A_ = False , A_ = 0.0 , A_ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , A_ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , A_ = "UNetMidBlock1D" , A_ = None , A_ = (32, 32, 64) , A_ = None , A_ = 8 , A_ = 1 , A_ = False , ) -> Dict:
"""simple docstring"""
super().__init__()
_lowerCamelCase = sample_size
# time
if time_embedding_type == "fourier":
_lowerCamelCase = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=A_ , log=A_ , flip_sin_to_cos=A_ )
_lowerCamelCase = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
_lowerCamelCase = Timesteps(
block_out_channels[0] , flip_sin_to_cos=A_ , downscale_freq_shift=A_ )
_lowerCamelCase = block_out_channels[0]
if use_timestep_embedding:
_lowerCamelCase = block_out_channels[0] * 4
_lowerCamelCase = TimestepEmbedding(
in_channels=A_ , time_embed_dim=A_ , act_fn=A_ , out_dim=block_out_channels[0] , )
_lowerCamelCase = nn.ModuleList([] )
_lowerCamelCase = None
_lowerCamelCase = nn.ModuleList([] )
_lowerCamelCase = None
# down
_lowerCamelCase = in_channels
for i, down_block_type in enumerate(A_ ):
_lowerCamelCase = output_channel
_lowerCamelCase = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_lowerCamelCase = i == len(A_ ) - 1
_lowerCamelCase = get_down_block(
A_ , num_layers=A_ , in_channels=A_ , out_channels=A_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(A_ )
# mid
_lowerCamelCase = get_mid_block(
A_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=A_ , add_downsample=A_ , )
# up
_lowerCamelCase = list(reversed(A_ ) )
_lowerCamelCase = reversed_block_out_channels[0]
if out_block_type is None:
_lowerCamelCase = out_channels
else:
_lowerCamelCase = block_out_channels[0]
for i, up_block_type in enumerate(A_ ):
_lowerCamelCase = output_channel
_lowerCamelCase = (
reversed_block_out_channels[i + 1] if i < len(A_ ) - 1 else final_upsample_channels
)
_lowerCamelCase = i == len(A_ ) - 1
_lowerCamelCase = get_up_block(
A_ , num_layers=A_ , in_channels=A_ , out_channels=A_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(A_ )
_lowerCamelCase = output_channel
# out
_lowerCamelCase = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
_lowerCamelCase = get_out_block(
out_block_type=A_ , num_groups_out=A_ , embed_dim=block_out_channels[0] , out_channels=A_ , act_fn=A_ , fc_dim=block_out_channels[-1] // 4 , )
def UpperCamelCase_ ( self , A_ , A_ , A_ = True , ) -> Union[UNetaDOutput, Tuple]:
"""simple docstring"""
_lowerCamelCase = timestep
if not torch.is_tensor(A_ ):
_lowerCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(A_ ) and len(timesteps.shape ) == 0:
_lowerCamelCase = timesteps[None].to(sample.device )
_lowerCamelCase = self.time_proj(A_ )
if self.config.use_timestep_embedding:
_lowerCamelCase = self.time_mlp(A_ )
else:
_lowerCamelCase = timestep_embed[..., None]
_lowerCamelCase = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
_lowerCamelCase = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
_lowerCamelCase = ()
for downsample_block in self.down_blocks:
_lowerCamelCase , _lowerCamelCase = downsample_block(hidden_states=A_ , temb=A_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_lowerCamelCase = self.mid_block(A_ , A_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
_lowerCamelCase = down_block_res_samples[-1:]
_lowerCamelCase = down_block_res_samples[:-1]
_lowerCamelCase = upsample_block(A_ , res_hidden_states_tuple=A_ , temb=A_ )
# 5. post-process
if self.out_block:
_lowerCamelCase = self.out_block(A_ , A_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=A_ ) | 638 | 1 |
'''simple docstring'''
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class __SCREAMING_SNAKE_CASE :
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = True , lowerCamelCase = False ) ->Optional[Any]:
'''simple docstring'''
__a = scheduler
__a = optimizers if isinstance(lowerCamelCase , (list, tuple) ) else [optimizers]
__a = split_batches
__a = step_with_optimizer
__a = GradientState()
def __UpperCamelCase ( self , *lowerCamelCase , **lowerCamelCase ) ->Tuple:
'''simple docstring'''
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*lowerCamelCase , **lowerCamelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*lowerCamelCase , **lowerCamelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
__a = AcceleratorState().num_processes
for _ in range(lowerCamelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , 'total_steps' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*lowerCamelCase , **lowerCamelCase )
else:
self.scheduler.step(*lowerCamelCase , **lowerCamelCase )
def __UpperCamelCase ( self ) ->int:
'''simple docstring'''
return self.scheduler.get_last_lr()
def __UpperCamelCase ( self ) ->Any:
'''simple docstring'''
return self.scheduler.state_dict()
def __UpperCamelCase ( self , lowerCamelCase ) ->int:
'''simple docstring'''
self.scheduler.load_state_dict(lowerCamelCase )
def __UpperCamelCase ( self ) ->Union[str, Any]:
'''simple docstring'''
return self.scheduler.get_lr()
def __UpperCamelCase ( self , *lowerCamelCase , **lowerCamelCase ) ->str:
'''simple docstring'''
return self.scheduler.print_lr(*lowerCamelCase , **lowerCamelCase ) | 448 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowercase : Tuple = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowercase : List[str] = """ \"\"\"
Output class for the scheduler's step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"\"\"
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
"""
class __snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir ,"""schedulers/""" ) )
lowercase : Any = self.diffusers_dir
shutil.copy(
os.path.join(snake_case ,"""src/diffusers/schedulers/scheduling_ddpm.py""" ) ,os.path.join(self.diffusers_dir ,"""schedulers/scheduling_ddpm.py""" ) ,)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = """src/diffusers"""
shutil.rmtree(self.diffusers_dir )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case=None ):
'''simple docstring'''
lowercase : Optional[int] = comment + f"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
lowercase : Optional[Any] = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result
lowercase : List[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=119 )
lowercase : int = black.format_str(snake_case ,mode=snake_case )
lowercase : int = os.path.join(self.diffusers_dir ,"""new_code.py""" )
with open(snake_case ,"""w""" ,newline="""\n""" ) as f:
f.write(snake_case )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(snake_case ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name ,overwrite=snake_case )
with open(snake_case ,"""r""" ) as f:
self.assertTrue(f.read() ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = check_copies.find_code_in_diffusers("""schedulers.scheduling_ddpm.DDPMSchedulerOutput""" )
self.assertEqual(snake_case ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" ,"""DDPMSchedulerOutput""" ,REFERENCE_CODE + """\n""" ,)
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" ,"""DDPMSchedulerOutput""" ,snake_case ,)
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" ,"""TestSchedulerOutput""" ,re.sub("""DDPM""" ,"""Test""" ,snake_case ) ,)
# Copy consistency with a really long name
lowercase : Dict = """TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
f"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}" ,f"{long_class_name}SchedulerOutput" ,re.sub("""Bert""" ,snake_case ,snake_case ) ,)
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" ,"""TestSchedulerOutput""" ,snake_case ,overwrite_result=re.sub("""DDPM""" ,"""Test""" ,snake_case ) ,)
| 336 | 0 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def snake_case (UpperCamelCase : Dict[str, torch.Tensor] ):
'''simple docstring'''
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
for rt in rc.restypes:
lowerCamelCase__ = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
lowerCamelCase__ = {name: i for i, name in enumerate(UpperCamelCase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
lowerCamelCase__ = torch.tensor(
UpperCamelCase , dtype=torch.intaa , device=protein["""aatype"""].device , )
lowerCamelCase__ = torch.tensor(
UpperCamelCase , dtype=torch.intaa , device=protein["""aatype"""].device , )
lowerCamelCase__ = torch.tensor(
UpperCamelCase , dtype=torch.floataa , device=protein["""aatype"""].device , )
lowerCamelCase__ = protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
lowerCamelCase__ = restype_atomaa_to_atomaa[protein_aatype]
lowerCamelCase__ = restype_atomaa_mask[protein_aatype]
lowerCamelCase__ = residx_atomaa_mask
lowerCamelCase__ = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
lowerCamelCase__ = restype_atomaa_to_atomaa[protein_aatype]
lowerCamelCase__ = residx_atomaa_to_atomaa.long()
# create the corresponding mask
lowerCamelCase__ = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
lowerCamelCase__ = rc.restype_atoa[restype_letter]
lowerCamelCase__ = rc.residue_atoms[restype_name]
for atom_name in atom_names:
lowerCamelCase__ = rc.atom_order[atom_name]
lowerCamelCase__ = 1
lowerCamelCase__ = restype_atomaa_mask[protein_aatype]
lowerCamelCase__ = residx_atomaa_mask
return protein
def snake_case (UpperCamelCase : Dict[str, torch.Tensor] ):
'''simple docstring'''
lowerCamelCase__ = tree_map(lambda UpperCamelCase : torch.tensor(UpperCamelCase , device=batch["""aatype"""].device ) , UpperCamelCase , np.ndarray )
lowerCamelCase__ = tensor_tree_map(lambda UpperCamelCase : np.array(UpperCamelCase ) , make_atomaa_masks(UpperCamelCase ) )
return out
| 235 |
def snake_case (UpperCamelCase : int ):
'''simple docstring'''
return str(UpperCamelCase ) == str(UpperCamelCase )[::-1]
def snake_case (UpperCamelCase : int ):
'''simple docstring'''
return int(UpperCamelCase ) + int(str(UpperCamelCase )[::-1] )
def snake_case (UpperCamelCase : int = 10000 ):
'''simple docstring'''
lowerCamelCase__ = []
for num in range(1 , UpperCamelCase ):
lowerCamelCase__ = 0
lowerCamelCase__ = num
while iterations < 50:
lowerCamelCase__ = sum_reverse(UpperCamelCase )
iterations += 1
if is_palindrome(UpperCamelCase ):
break
else:
lychrel_nums.append(UpperCamelCase )
return len(UpperCamelCase )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 235 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ : List[Any] = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : List[Any] = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Tuple = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Tuple = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 435 | '''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = CodeGenTokenizer
UpperCAmelCase__ = CodeGenTokenizerFast
UpperCAmelCase__ = True
UpperCAmelCase__ = {'''add_prefix_space''': True}
UpperCAmelCase__ = False
def snake_case__ ( self : str ) ->str:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCamelCase : str = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
_UpperCamelCase : Optional[int] = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
_UpperCamelCase : List[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_UpperCamelCase : Union[str, Any] = {"unk_token": "<unk>"}
_UpperCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase__ ) )
def snake_case__ ( self : Union[str, Any] , **lowercase__ : int ) ->str:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def snake_case__ ( self : int , **lowercase__ : List[Any] ) ->Optional[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **lowercase__ )
def snake_case__ ( self : str , lowercase__ : Dict ) ->List[str]:
'''simple docstring'''
_UpperCamelCase : int = "lower newer"
_UpperCamelCase : int = "lower newer"
return input_text, output_text
def snake_case__ ( self : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase : Optional[int] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCamelCase : Dict = "lower newer"
_UpperCamelCase : Any = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
_UpperCamelCase : str = tokenizer.tokenize(lowercase__ , add_prefix_space=lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
_UpperCamelCase : str = tokens + [tokenizer.unk_token]
_UpperCamelCase : List[str] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
def snake_case__ ( self : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_UpperCamelCase : Union[str, Any] = self.get_tokenizer()
_UpperCamelCase : Tuple = self.get_rust_tokenizer(add_prefix_space=lowercase__ )
_UpperCamelCase : Any = "lower newer"
# Testing tokenization
_UpperCamelCase : Optional[int] = tokenizer.tokenize(lowercase__ , add_prefix_space=lowercase__ )
_UpperCamelCase : Optional[Any] = rust_tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
# Testing conversion to ids without special tokens
_UpperCamelCase : List[str] = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
_UpperCamelCase : Dict = rust_tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
# Testing conversion to ids with special tokens
_UpperCamelCase : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=lowercase__ )
_UpperCamelCase : str = tokenizer.encode(lowercase__ , add_prefix_space=lowercase__ )
_UpperCamelCase : Optional[int] = rust_tokenizer.encode(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
# Testing the unknown token
_UpperCamelCase : Optional[Any] = tokens + [rust_tokenizer.unk_token]
_UpperCamelCase : Dict = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
def snake_case__ ( self : Any , *lowercase__ : Union[str, Any] , **lowercase__ : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
pass
def snake_case__ ( self : List[Any] , lowercase__ : str=15 ) ->Any:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : Any = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
# Simple input
_UpperCamelCase : str = "This is a simple input"
_UpperCamelCase : List[str] = ["This is a simple input 1", "This is a simple input 2"]
_UpperCamelCase : Tuple = ("This is a simple input", "This is a pair")
_UpperCamelCase : List[str] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowercase__ , tokenizer_r.encode , lowercase__ , max_length=lowercase__ , padding="max_length" )
# Simple input
self.assertRaises(lowercase__ , tokenizer_r.encode_plus , lowercase__ , max_length=lowercase__ , padding="max_length" )
# Simple input
self.assertRaises(
lowercase__ , tokenizer_r.batch_encode_plus , lowercase__ , max_length=lowercase__ , padding="max_length" , )
# Pair input
self.assertRaises(lowercase__ , tokenizer_r.encode , lowercase__ , max_length=lowercase__ , padding="max_length" )
# Pair input
self.assertRaises(lowercase__ , tokenizer_r.encode_plus , lowercase__ , max_length=lowercase__ , padding="max_length" )
# Pair input
self.assertRaises(
lowercase__ , tokenizer_r.batch_encode_plus , lowercase__ , max_length=lowercase__ , padding="max_length" , )
def snake_case__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
_UpperCamelCase : List[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
_UpperCamelCase : Dict = "This is a simple input"
_UpperCamelCase : Any = ["This is a simple input looooooooong", "This is a simple input"]
_UpperCamelCase : Union[str, Any] = ("This is a simple input", "This is a pair")
_UpperCamelCase : Union[str, Any] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
_UpperCamelCase : Tuple = tokenizer.pad_token_id
_UpperCamelCase : List[Any] = tokenizer(lowercase__ , padding="max_length" , max_length=30 , return_tensors="np" )
_UpperCamelCase : Optional[Any] = tokenizer(lowercase__ , padding=lowercase__ , truncate=lowercase__ , return_tensors="np" )
_UpperCamelCase : Union[str, Any] = tokenizer(*lowercase__ , padding="max_length" , max_length=60 , return_tensors="np" )
_UpperCamelCase : List[Any] = tokenizer(lowercase__ , padding=lowercase__ , truncate=lowercase__ , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def snake_case__ ( self : Tuple ) ->int:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = "$$$"
_UpperCamelCase : str = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=lowercase__ , add_bos_token=lowercase__ )
_UpperCamelCase : List[Any] = "This is a simple input"
_UpperCamelCase : Optional[int] = ["This is a simple input 1", "This is a simple input 2"]
_UpperCamelCase : Optional[Any] = tokenizer.bos_token_id
_UpperCamelCase : str = tokenizer(lowercase__ )
_UpperCamelCase : Any = tokenizer(lowercase__ )
self.assertEqual(out_s.input_ids[0] , lowercase__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_UpperCamelCase : int = tokenizer.decode(out_s.input_ids )
_UpperCamelCase : List[Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , lowercase__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def snake_case__ ( self : Union[str, Any] ) ->Tuple:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" )
_UpperCamelCase : Any = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
_UpperCamelCase : Optional[int] = "\nif len_a > len_b: result = a\nelse: result = b"
_UpperCamelCase : str = tokenizer.encode(lowercase__ )
_UpperCamelCase : List[Any] = ["^#", re.escape("<|endoftext|>" ), "^'''", "^\"\"\"", "\n\n\n"]
_UpperCamelCase : Optional[int] = tokenizer.decode(lowercase__ , truncate_before_pattern=lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def snake_case__ ( self : int ) ->str:
'''simple docstring'''
pass
| 435 | 1 |
import re
from filelock import FileLock
try:
import nltk
lowerCAmelCase__ = True
except (ImportError, ModuleNotFoundError):
lowerCAmelCase__ = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def __lowercase ( _UpperCAmelCase ) -> str:
'''simple docstring'''
re.sub("<n>" , "" , _UpperCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_UpperCAmelCase ) )
| 576 | from functools import reduce
lowerCAmelCase__ = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def __lowercase ( _UpperCAmelCase = N ) -> int:
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _UpperCAmelCase , _UpperCAmelCase : str(int(_UpperCAmelCase ) * int(_UpperCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(_UpperCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 576 | 1 |
"""simple docstring"""
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
__A = re.compile(R"""^(?P<major>\d+)""" R"""\.(?P<minor>\d+)""" R"""\.(?P<patch>\d+)$""")
@total_ordering
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :str
__magic_name__ :Optional[str] = None
__magic_name__ :Optional[Union[str, int]] = None
__magic_name__ :Optional[Union[str, int]] = None
__magic_name__ :Optional[Union[str, int]] = None
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Tuple = _str_to_version_tuple(self.version_str )
def __repr__( self ):
'''simple docstring'''
return F"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"
@property
def snake_case ( self ):
'''simple docstring'''
return self.major, self.minor, self.patch
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return Version(__UpperCAmelCase )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return other
raise TypeError(F"{other} (type {type(__UpperCAmelCase )}) cannot be compared to version." )
def __eq__( self , __UpperCAmelCase ):
'''simple docstring'''
try:
lowerCAmelCase__ :Optional[int] = self._validate_operand(__UpperCAmelCase )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self._validate_operand(__UpperCAmelCase )
return self.tuple < other.tuple
def __hash__( self ):
'''simple docstring'''
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def snake_case ( cls , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def snake_case ( self ):
'''simple docstring'''
return self.version_str
def __A (_SCREAMING_SNAKE_CASE ) ->Dict:
"""simple docstring"""
lowerCAmelCase__ :Any = _VERSION_REG.match(_SCREAMING_SNAKE_CASE )
if not res:
raise ValueError(F"Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits." )
return tuple(int(_SCREAMING_SNAKE_CASE ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] )
def __A (_SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
return ".".join(str(_SCREAMING_SNAKE_CASE ) for v in version_tuple )
| 93 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Any = logging.get_logger(__name__)
__A : Union[str, Any] = {
'tanreinama/GPTSAN-2.8B-spout_is_uniform': (
'https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'
),
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:List[Any] = 'gptsan-japanese'
SCREAMING_SNAKE_CASE:str = [
'past_key_values',
]
SCREAMING_SNAKE_CASE:int = {
'hidden_size': 'd_model',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , _a=3_6000 , _a=1280 , _a=1024 , _a=8192 , _a=4096 , _a=128 , _a=10 , _a=0 , _a=16 , _a=16 , _a=128 , _a=0.0 , _a=1e-5 , _a=False , _a=0.0 , _a="float32" , _a=False , _a=False , _a=False , _a=0.002 , _a=False , _a=True , _a=3_5998 , _a=3_5995 , _a=3_5999 , **_a , ):
"""simple docstring"""
a__ = vocab_size
a__ = max_position_embeddings
a__ = d_model
a__ = d_ff
a__ = d_ext
a__ = d_spout
a__ = num_switch_layers
a__ = num_ext_layers
a__ = num_switch_layers + num_ext_layers
a__ = num_heads
a__ = num_experts
a__ = expert_capacity
a__ = dropout_rate
a__ = layer_norm_epsilon
a__ = router_bias
a__ = router_jitter_noise
a__ = router_dtype
a__ = router_ignore_padding_tokens
a__ = output_hidden_states
a__ = output_attentions
a__ = initializer_factor
a__ = output_router_logits
a__ = use_cache
super().__init__(
separator_token_id=_a , pad_token_id=_a , eos_token_id=_a , **_a , )
| 394 | 0 |
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class _lowerCamelCase (unittest.TestCase ):
lowercase__ = MODEL_FOR_CAUSAL_LM_MAPPING
lowercase__ = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def __lowerCamelCase ( self ):
__snake_case = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='pt' )
# Using `do_sample=False` to force deterministic output
__snake_case = text_generator('This is a test' , do_sample=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
] , )
__snake_case = text_generator(['This is a test', 'This is a second test'] )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
[
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
],
[
{
'generated_text': (
'This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'
' oscope. oscope. FiliFili@@'
)
}
],
] , )
__snake_case = text_generator('This is a test' , do_sample=SCREAMING_SNAKE_CASE_ , num_return_sequences=2 , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
{'generated_token_ids': ANY(SCREAMING_SNAKE_CASE_ )},
{'generated_token_ids': ANY(SCREAMING_SNAKE_CASE_ )},
] , )
__snake_case = text_generator.model.config.eos_token_id
__snake_case = '<pad>'
__snake_case = text_generator(
['This is a test', 'This is a second test'] , do_sample=SCREAMING_SNAKE_CASE_ , num_return_sequences=2 , batch_size=2 , return_tensors=SCREAMING_SNAKE_CASE_ , )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
[
{'generated_token_ids': ANY(SCREAMING_SNAKE_CASE_ )},
{'generated_token_ids': ANY(SCREAMING_SNAKE_CASE_ )},
],
[
{'generated_token_ids': ANY(SCREAMING_SNAKE_CASE_ )},
{'generated_token_ids': ANY(SCREAMING_SNAKE_CASE_ )},
],
] , )
@require_tf
def __lowerCamelCase ( self ):
__snake_case = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='tf' )
# Using `do_sample=False` to force deterministic output
__snake_case = text_generator('This is a test' , do_sample=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
] , )
__snake_case = text_generator(['This is a test', 'This is a second test'] , do_sample=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
[
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
],
[
{
'generated_text': (
'This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'
' Cannes 閲閲Cannes Cannes Cannes 攵 please,'
)
}
],
] , )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__snake_case = TextGenerationPipeline(model=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ )
return text_generator, ["This is a test", "Another test"]
def __lowerCamelCase ( self ):
__snake_case = 'Hello I believe in'
__snake_case = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
__snake_case = text_generator(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [{'generated_text': 'Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'}] , )
__snake_case = text_generator(SCREAMING_SNAKE_CASE_ , stop_sequence=' fe' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [{'generated_text': 'Hello I believe in fe'}] )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__snake_case = text_generator.model
__snake_case = text_generator.tokenizer
__snake_case = text_generator('This is a test' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [{'generated_text': ANY(SCREAMING_SNAKE_CASE_ )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
__snake_case = text_generator('This is a test' , return_full_text=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [{'generated_text': ANY(SCREAMING_SNAKE_CASE_ )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
__snake_case = pipeline(task='text-generation' , model=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , return_full_text=SCREAMING_SNAKE_CASE_ )
__snake_case = text_generator('This is a test' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [{'generated_text': ANY(SCREAMING_SNAKE_CASE_ )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
__snake_case = text_generator('This is a test' , return_full_text=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [{'generated_text': ANY(SCREAMING_SNAKE_CASE_ )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
__snake_case = text_generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
[{'generated_text': ANY(SCREAMING_SNAKE_CASE_ )}, {'generated_text': ANY(SCREAMING_SNAKE_CASE_ )}],
[{'generated_text': ANY(SCREAMING_SNAKE_CASE_ )}, {'generated_text': ANY(SCREAMING_SNAKE_CASE_ )}],
] , )
if text_generator.tokenizer.pad_token is not None:
__snake_case = text_generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
[{'generated_text': ANY(SCREAMING_SNAKE_CASE_ )}, {'generated_text': ANY(SCREAMING_SNAKE_CASE_ )}],
[{'generated_text': ANY(SCREAMING_SNAKE_CASE_ )}, {'generated_text': ANY(SCREAMING_SNAKE_CASE_ )}],
] , )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
__snake_case = text_generator('test' , return_full_text=SCREAMING_SNAKE_CASE_ , return_text=SCREAMING_SNAKE_CASE_ )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
__snake_case = text_generator('test' , return_full_text=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
__snake_case = text_generator('test' , return_text=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
__snake_case = text_generator('' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [{'generated_text': ANY(SCREAMING_SNAKE_CASE_ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
__snake_case = text_generator('' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
__snake_case = ['RwkvForCausalLM', 'XGLMForCausalLM', 'GPTNeoXForCausalLM']
if (
tokenizer.model_max_length < 10_000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('This is a test' * 500 , max_new_tokens=20 )
__snake_case = text_generator('This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
text_generator(
'This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def __lowerCamelCase ( self ):
import torch
# Classic `model_kwargs`
__snake_case = pipeline(
model='hf-internal-testing/tiny-random-bloom' , model_kwargs={'device_map': 'auto', 'torch_dtype': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__snake_case = pipe('This is a test' )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
__snake_case = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__snake_case = pipe('This is a test' )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
__snake_case = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
__snake_case = pipe('This is a test' )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
@require_torch
@require_torch_gpu
def __lowerCamelCase ( self ):
import torch
__snake_case = pipeline(model='hf-internal-testing/tiny-random-bloom' , device=0 , torch_dtype=torch.floataa )
pipe('This is a test' )
@require_torch
@require_accelerate
@require_torch_gpu
def __lowerCamelCase ( self ):
import torch
__snake_case = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.floataa )
pipe('This is a test' , do_sample=SCREAMING_SNAKE_CASE_ , top_p=0.5 )
def __lowerCamelCase ( self ):
__snake_case = 'Hello world'
__snake_case = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
if text_generator.model.framework == "tf":
__snake_case = logging.get_logger('transformers.generation.tf_utils' )
else:
__snake_case = logging.get_logger('transformers.generation.utils' )
__snake_case = 'Both `max_new_tokens`' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(SCREAMING_SNAKE_CASE_ ) as cl:
__snake_case = text_generator(SCREAMING_SNAKE_CASE_ , max_length=10 , max_new_tokens=1 )
self.assertIn(SCREAMING_SNAKE_CASE_ , cl.out )
# The user only sets one -> no warning
with CaptureLogger(SCREAMING_SNAKE_CASE_ ) as cl:
__snake_case = text_generator(SCREAMING_SNAKE_CASE_ , max_new_tokens=1 )
self.assertNotIn(SCREAMING_SNAKE_CASE_ , cl.out )
with CaptureLogger(SCREAMING_SNAKE_CASE_ ) as cl:
__snake_case = text_generator(SCREAMING_SNAKE_CASE_ , max_length=10 )
self.assertNotIn(SCREAMING_SNAKE_CASE_ , cl.out )
| 721 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCamelCase (unittest.TestCase ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=18 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=400 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , ):
__snake_case = size if size is not None else {'height': 18, 'width': 18}
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = do_resize
__snake_case = size
__snake_case = apply_ocr
def __lowerCamelCase ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _lowerCamelCase (lowerCamelCase , unittest.TestCase ):
lowercase__ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __lowerCamelCase ( self ):
__snake_case = LayoutLMvaImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'do_resize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'size' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'apply_ocr' ) )
def __lowerCamelCase ( self ):
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
# Initialize image_processing
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(encoding.boxes , SCREAMING_SNAKE_CASE_ )
# Test batched
__snake_case = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__snake_case = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__snake_case = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __lowerCamelCase ( self ):
# with apply_OCR = True
__snake_case = LayoutLMvaImageProcessor()
from datasets import load_dataset
__snake_case = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__snake_case = Image.open(ds[0]['file'] ).convert('RGB' )
__snake_case = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__snake_case = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__snake_case = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(encoding.boxes , SCREAMING_SNAKE_CASE_ )
# with apply_OCR = False
__snake_case = LayoutLMvaImageProcessor(apply_ocr=SCREAMING_SNAKE_CASE_ )
__snake_case = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 345 | 0 |
'''simple docstring'''
import math
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase=0 ) -> str: # a graph with Node 0,1,...,N-1
_lowerCAmelCase = n
_lowerCAmelCase = [
[math.inf for j in range(0 , _lowerCAmelCase )] for i in range(0 , _lowerCAmelCase )
] # adjacency matrix for weight
_lowerCAmelCase = [
[math.inf for j in range(0 , _lowerCAmelCase )] for i in range(0 , _lowerCAmelCase )
] # dp[i][j] stores minimum distance from i to j
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
_lowerCAmelCase = w
def _snake_case ( self ) -> Optional[Any]:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
_lowerCAmelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
return self.dp[u][v]
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 18 |
class snake_case__ :
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
"""simple docstring"""
a_ : Optional[Any] = name
a_ : Union[str, Any] = val
def __str__( self ) -> Tuple:
"""simple docstring"""
return f"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self , UpperCamelCase_ ) -> Union[str, Any]:
"""simple docstring"""
return self.val < other.val
class snake_case__ :
def __init__( self , UpperCamelCase_ ) -> int:
"""simple docstring"""
a_ : Tuple = {}
a_ : Optional[int] = {}
a_ : Tuple = self.build_heap(UpperCamelCase_ )
def __getitem__( self , UpperCamelCase_ ) -> Any:
"""simple docstring"""
return self.get_value(UpperCamelCase_ )
def A ( self , UpperCamelCase_ ) -> List[Any]:
"""simple docstring"""
return (idx - 1) // 2
def A ( self , UpperCamelCase_ ) -> Optional[Any]:
"""simple docstring"""
return idx * 2 + 1
def A ( self , UpperCamelCase_ ) -> Optional[Any]:
"""simple docstring"""
return idx * 2 + 2
def A ( self , UpperCamelCase_ ) -> List[Any]:
"""simple docstring"""
return self.heap_dict[key]
def A ( self , UpperCamelCase_ ) -> Optional[int]:
"""simple docstring"""
a_ : Tuple = len(UpperCamelCase_ ) - 1
a_ : Union[str, Any] = self.get_parent_idx(UpperCamelCase_ )
for idx, i in enumerate(UpperCamelCase_ ):
a_ : Tuple = idx
a_ : Optional[int] = i.val
for i in range(UpperCamelCase_ , -1 , -1 ):
self.sift_down(UpperCamelCase_ , UpperCamelCase_ )
return array
def A ( self , UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
"""simple docstring"""
while True:
a_ : Tuple = self.get_left_child_idx(UpperCamelCase_ ) # noqa: E741
a_ : Optional[Any] = self.get_right_child_idx(UpperCamelCase_ )
a_ : Union[str, Any] = idx
if l < len(UpperCamelCase_ ) and array[l] < array[idx]:
a_ : int = l
if r < len(UpperCamelCase_ ) and array[r] < array[smallest]:
a_ : Optional[int] = r
if smallest != idx:
a_ , a_ : Optional[int] = array[smallest], array[idx]
(
(
a_
) , (
a_
) ,
) : Tuple = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
a_ : List[str] = smallest
else:
break
def A ( self , UpperCamelCase_ ) -> Any:
"""simple docstring"""
a_ : Union[str, Any] = self.get_parent_idx(UpperCamelCase_ )
while p >= 0 and self.heap[p] > self.heap[idx]:
a_ , a_ : Tuple = self.heap[idx], self.heap[p]
a_ , a_ : Tuple = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
a_ : Dict = p
a_ : Tuple = self.get_parent_idx(UpperCamelCase_ )
def A ( self ) -> Any:
"""simple docstring"""
return self.heap[0]
def A ( self ) -> str:
"""simple docstring"""
a_ , a_ : Any = self.heap[-1], self.heap[0]
a_ , a_ : List[Any] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
a_ : Any = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def A ( self , UpperCamelCase_ ) -> int:
"""simple docstring"""
self.heap.append(UpperCamelCase_ )
a_ : List[Any] = len(self.heap ) - 1
a_ : Optional[int] = node.val
self.sift_up(len(self.heap ) - 1 )
def A ( self ) -> Any:
"""simple docstring"""
return len(self.heap ) == 0
def A ( self , UpperCamelCase_ , UpperCamelCase_ ) -> int:
"""simple docstring"""
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
a_ : str = new_value
a_ : Optional[Any] = new_value
self.sift_up(self.idx_of_element[node] )
SCREAMING_SNAKE_CASE : int = Node("R", -1)
SCREAMING_SNAKE_CASE : List[str] = Node("B", 6)
SCREAMING_SNAKE_CASE : Optional[int] = Node("A", 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = Node("X", 1)
SCREAMING_SNAKE_CASE : Dict = Node("E", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
SCREAMING_SNAKE_CASE : Optional[int] = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("Min Heap - before decrease key")
for i in my_min_heap.heap:
print(i)
print("Min Heap - After decrease key of node [B -> -17]")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 419 | 0 |
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def snake_case ( lowerCAmelCase_ = 8 ) -> str:
_snake_case = ascii_letters + digits + punctuation
return "".join(secrets.choice(lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ) )
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(lowerCAmelCase_ )
_snake_case = i // 3
_snake_case = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
_snake_case = (
chars_incl
+ random(lowerCAmelCase_ , quotient + remainder )
+ random(lowerCAmelCase_ , lowerCAmelCase_ )
+ random(lowerCAmelCase_ , lowerCAmelCase_ )
)
_snake_case = list(lowerCAmelCase_ )
shuffle(lowerCAmelCase_ )
return "".join(lowerCAmelCase_ )
# random is a generalised function for letters, characters and numbers
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
return "".join(secrets.choice(lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ) )
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
pass # Put your code here...
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
pass # Put your code here...
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
pass # Put your code here...
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ = 8 ) -> bool:
if len(lowerCAmelCase_ ) < min_length:
# Your Password must be at least 8 characters long
return False
_snake_case = any(char in ascii_uppercase for char in password )
_snake_case = any(char in ascii_lowercase for char in password )
_snake_case = any(char in digits for char in password )
_snake_case = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def snake_case ( ) -> Tuple:
_snake_case = int(input('''Please indicate the max length of your password: ''' ).strip() )
_snake_case = input(
'''Please indicate the characters that must be in your password: ''' ).strip()
print('''Password generated:''' , password_generator(lowerCAmelCase_ ) )
print(
'''Alternative Password generated:''' , alternative_password_generator(lowerCAmelCase_ , lowerCAmelCase_ ) , )
print('''[If you are thinking of using this passsword, You better save it.]''' )
if __name__ == "__main__":
main()
| 404 |
"""simple docstring"""
from math import isqrt
def snake_case ( lowerCAmelCase_ ) -> bool:
return all(number % divisor != 0 for divisor in range(2 , isqrt(lowerCAmelCase_ ) + 1 ) )
def snake_case ( lowerCAmelCase_ = 10**6 ) -> int:
_snake_case = 0
_snake_case = 1
_snake_case = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowerCAmelCase_ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"{solution() = }")
| 404 | 1 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_lowerCAmelCase = """true"""
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase=8_2 ,_lowerCAmelCase=1_6 ):
'''simple docstring'''
set_seed(4_2 )
A_ : List[Any] = RegressionModel()
A_ : int = deepcopy(_lowerCAmelCase )
A_ : Union[str, Any] = RegressionDataset(length=_lowerCAmelCase )
A_ : Union[str, Any] = DataLoader(_lowerCAmelCase ,batch_size=_lowerCAmelCase )
model.to(accelerator.device )
A_ , A_ : int = accelerator.prepare(_lowerCAmelCase ,_lowerCAmelCase )
return model, ddp_model, dataloader
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase=False ):
'''simple docstring'''
A_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""" )
A_ : Tuple = load_dataset("""glue""" ,"""mrpc""" ,split="""validation""" )
def tokenize_function(_lowerCAmelCase ):
A_ : Tuple = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=_lowerCAmelCase ,max_length=_lowerCAmelCase )
return outputs
with accelerator.main_process_first():
A_ : List[Any] = dataset.map(
_lowerCAmelCase ,batched=_lowerCAmelCase ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,)
A_ : Union[str, Any] = tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(_lowerCAmelCase ):
if use_longest:
return tokenizer.pad(_lowerCAmelCase ,padding="""longest""" ,return_tensors="""pt""" )
return tokenizer.pad(_lowerCAmelCase ,padding="""max_length""" ,max_length=1_2_8 ,return_tensors="""pt""" )
return DataLoader(_lowerCAmelCase ,shuffle=_lowerCAmelCase ,collate_fn=_lowerCAmelCase ,batch_size=1_6 )
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
A_ : int = Accelerator(dispatch_batches=_lowerCAmelCase ,split_batches=_lowerCAmelCase )
A_ : List[Any] = get_dataloader(_lowerCAmelCase ,not dispatch_batches )
A_ : Any = AutoModelForSequenceClassification.from_pretrained(
"""hf-internal-testing/mrpc-bert-base-cased""" ,return_dict=_lowerCAmelCase )
A_ , A_ : int = accelerator.prepare(_lowerCAmelCase ,_lowerCAmelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
A_ : Optional[Any] = []
for batch in dataloader:
A_ , A_ : List[str] = batch.values()
with torch.no_grad():
A_ : Tuple = model(_lowerCAmelCase )
A_ , A_ : List[str] = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
A_ , A_ : int = [], []
for logit, targ in logits_and_targets:
logits.append(_lowerCAmelCase )
targs.append(_lowerCAmelCase )
A_ , A_ : Optional[int] = torch.cat(_lowerCAmelCase ), torch.cat(_lowerCAmelCase )
return logits, targs
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase=8_2 ,_lowerCAmelCase=False ,_lowerCAmelCase=False ,_lowerCAmelCase=1_6 ):
'''simple docstring'''
A_ , A_ , A_ : Union[str, Any] = get_basic_setup(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
A_ , A_ : Union[str, Any] = generate_predictions(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
assert (
len(_lowerCAmelCase ) == num_samples
), f"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_lowerCAmelCase )}"""
def _lowerCAmelCase ( _lowerCAmelCase = False ,_lowerCAmelCase = False ):
'''simple docstring'''
A_ : Any = evaluate.load("""glue""" ,"""mrpc""" )
A_ , A_ : Optional[Any] = get_mrpc_setup(_lowerCAmelCase ,_lowerCAmelCase )
# First do baseline
A_ , A_ , A_ : Any = setup["""no"""]
model.to(_lowerCAmelCase )
model.eval()
for batch in dataloader:
batch.to(_lowerCAmelCase )
with torch.inference_mode():
A_ : List[str] = model(**_lowerCAmelCase )
A_ : int = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=_lowerCAmelCase ,references=batch["""labels"""] )
A_ : List[str] = metric.compute()
# Then do distributed
A_ , A_ , A_ : int = setup["""ddp"""]
model.eval()
for batch in dataloader:
with torch.inference_mode():
A_ : Optional[Any] = model(**_lowerCAmelCase )
A_ : Any = outputs.logits.argmax(dim=-1 )
A_ : int = batch["""labels"""]
A_ , A_ : Optional[Any] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=_lowerCAmelCase ,references=_lowerCAmelCase )
A_ : str = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] ,distributed[key] ), f"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def _lowerCAmelCase ( ):
'''simple docstring'''
A_ : str = Accelerator(split_batches=_lowerCAmelCase ,dispatch_batches=_lowerCAmelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("""**Testing gather_for_metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(_lowerCAmelCase ,_lowerCAmelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test torch metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
A_ : Optional[int] = Accelerator(split_batches=_lowerCAmelCase ,dispatch_batches=_lowerCAmelCase )
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(_lowerCAmelCase ,9_9 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test last batch is not dropped when perfectly divisible**""" )
A_ : Union[str, Any] = Accelerator()
test_torch_metrics(_lowerCAmelCase ,5_1_2 )
accelerator.state._reset_state()
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 569 |
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
if not isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
raise ValueError("""iterations must be defined as integers""" )
if not isinstance(_lowerCAmelCase ,_lowerCAmelCase ) or not number >= 1:
raise ValueError(
"""starting number must be
and integer and be more than 0""" )
if not iterations >= 1:
raise ValueError("""Iterations must be done more than 0 times to play FizzBuzz""" )
A_ : int = """"""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_lowerCAmelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 569 | 1 |
import warnings
from .generation import TFGenerationMixin
class lowerCamelCase__ ( __lowerCAmelCase ):
warnings.warn(
'Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '
'be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.', __lowerCAmelCase, )
| 715 |
from __future__ import annotations
def _a ( lowerCamelCase__ , lowerCamelCase__ ) -> list[int]:
lowerCamelCase_ : List[Any] = 0
lowerCamelCase_ : Union[str, Any] = len(lowerCamelCase__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowerCamelCase_ : List[Any] = i + 1
else:
lowerCamelCase_ : Any = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{two_pointer([2, 7, 1_1, 1_5], 9) = }''')
| 144 | 0 |
from __future__ import annotations
def A_ ( A__ , A__ , A__ , A__ ) -> None:
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
a__ , a__ : Any = array[indexa], array[indexa]
def A_ ( A__ , A__ , A__ , A__ ) -> None:
if length > 1:
a__ : int = int(length / 2 )
for i in range(A__ , low + middle ):
comp_and_swap(A__ , A__ , i + middle , A__ )
bitonic_merge(A__ , A__ , A__ , A__ )
bitonic_merge(A__ , low + middle , A__ , A__ )
def A_ ( A__ , A__ , A__ , A__ ) -> None:
if length > 1:
a__ : Optional[int] = int(length / 2 )
bitonic_sort(A__ , A__ , A__ , 1 )
bitonic_sort(A__ , low + middle , A__ , 0 )
bitonic_merge(A__ , A__ , A__ , A__ )
if __name__ == "__main__":
lowercase : int = input("""Enter numbers separated by a comma:\n""").strip()
lowercase : Optional[int] = [int(item.strip()) for item in user_input.split(""",""")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("""\nSorted array in ascending order is: """, end="""""")
print(*unsorted, sep=""", """)
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("""Sorted array in descending order is: """, end="""""")
print(*unsorted, sep=""", """)
| 302 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowercase : Any = logging.get_logger(__name__)
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self , *lowercase , **lowercase) -> None:
'''simple docstring'''
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , lowercase , )
super().__init__(*lowercase , **lowercase)
| 302 | 1 |
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase = 100 ):
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : str = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 705 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowerCamelCase__ = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
lowerCamelCase__ = {
'169M': 768,
'430M': 1_024,
'1B5': 2_048,
'3B': 2_560,
'7B': 4_096,
'14B': 5_120,
}
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[str] = list(state_dict.keys() )
for name in state_dict_keys:
_UpperCAmelCase : Optional[int] = state_dict.pop(__lowerCAmelCase )
# emb -> embedding
if name.startswith("emb." ):
_UpperCAmelCase : Tuple = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
_UpperCAmelCase : Optional[int] = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
_UpperCAmelCase : Union[str, Any] = re.sub(R"blocks\.(\d+)\.att" , R"blocks.\1.attention" , __lowerCAmelCase )
# ffn -> feed_forward
_UpperCAmelCase : Dict = re.sub(R"blocks\.(\d+)\.ffn" , R"blocks.\1.feed_forward" , __lowerCAmelCase )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
_UpperCAmelCase : int = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
_UpperCAmelCase : Union[str, Any] = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
_UpperCAmelCase : int = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
_UpperCAmelCase : List[str] = "rwkv." + name
_UpperCAmelCase : Optional[Any] = weight
return state_dict
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=None ):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
_UpperCAmelCase : str = 50_277
_UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
_UpperCAmelCase : Tuple = PreTrainedTokenizerFast(tokenizer_file=__lowerCAmelCase )
_UpperCAmelCase : List[Any] = len(__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
# 2. Build the config
_UpperCAmelCase : Optional[int] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_UpperCAmelCase : Optional[Any] = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
_UpperCAmelCase : Any = RwkvConfig(
vocab_size=__lowerCAmelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__lowerCAmelCase )
# 3. Download model file then convert state_dict
_UpperCAmelCase : str = hf_hub_download(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Optional[int] = torch.load(__lowerCAmelCase , map_location="cpu" )
_UpperCAmelCase : Any = convert_state_dict(__lowerCAmelCase )
# 4. Split in shards and save
_UpperCAmelCase , _UpperCAmelCase : List[str] = shard_checkpoint(__lowerCAmelCase )
for shard_file, shard in shards.items():
torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
if index is not None:
_UpperCAmelCase : int = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
# Save the index as well
with open(__lowerCAmelCase , "w" , encoding="utf-8" ) as f:
_UpperCAmelCase : int = json.dumps(__lowerCAmelCase , indent=2 , sort_keys=__lowerCAmelCase ) + "\n"
f.write(__lowerCAmelCase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
_UpperCAmelCase : Union[str, Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_UpperCAmelCase : Union[str, Any] = torch.load(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
_UpperCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(__lowerCAmelCase )
model.push_to_hub(__lowerCAmelCase , max_shard_size="2GB" )
tokenizer.push_to_hub(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
lowerCamelCase__ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 40 | 0 |
"""simple docstring"""
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
# load base model
_lowerCamelCase : List[Any] = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
_lowerCamelCase : List[str] = load_file(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Union[str, Any] = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
_lowerCamelCase : List[Any] = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' )
_lowerCamelCase : Union[str, Any] = pipeline.text_encoder
else:
_lowerCamelCase : Union[str, Any] = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' )
_lowerCamelCase : Union[str, Any] = pipeline.unet
# find the target layer
_lowerCamelCase : List[Any] = layer_infos.pop(0 )
while len(SCREAMING_SNAKE_CASE_ ) > -1:
try:
_lowerCamelCase : Optional[int] = curr_layer.__getattr__(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
_lowerCamelCase : Optional[Any] = layer_infos.pop(0 )
elif len(SCREAMING_SNAKE_CASE_ ) == 0:
break
except Exception:
if len(SCREAMING_SNAKE_CASE_ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
_lowerCamelCase : Tuple = layer_infos.pop(0 )
_lowerCamelCase : Dict = []
if "lora_down" in key:
pair_keys.append(key.replace('lora_down' , 'lora_up' ) )
pair_keys.append(SCREAMING_SNAKE_CASE_ )
else:
pair_keys.append(SCREAMING_SNAKE_CASE_ )
pair_keys.append(key.replace('lora_up' , 'lora_down' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
_lowerCamelCase : Any = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
_lowerCamelCase : Dict = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).unsqueeze(2 ).unsqueeze(3 )
else:
_lowerCamelCase : Optional[Any] = state_dict[pair_keys[0]].to(torch.floataa )
_lowerCamelCase : Union[str, Any] = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# update visited list
for item in pair_keys:
visited.append(SCREAMING_SNAKE_CASE_ )
return pipeline
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
lowercase__ = parser.parse_args()
lowercase__ = args.base_model_path
lowercase__ = args.checkpoint_path
lowercase__ = args.dump_path
lowercase__ = args.lora_prefix_unet
lowercase__ = args.lora_prefix_text_encoder
lowercase__ = args.alpha
lowercase__ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
lowercase__ = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) | 630 |
from __future__ import annotations
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , ) -> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative in a semiconductor""" )
elif hole_conc < 0:
raise ValueError("""Hole concentration cannot be negative in a semiconductor""" )
elif intrinsic_conc < 0:
raise ValueError(
"""Intrinsic concentration cannot be negative in a semiconductor""" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod() | 287 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"microsoft/unispeech-sat-base-100h-libri-ft": (
"https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class _lowercase ( _lowerCamelCase ):
_UpperCamelCase = """unispeech-sat"""
def __init__( self , _UpperCAmelCase=32 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-5 , _UpperCAmelCase="group" , _UpperCAmelCase="gelu" , _UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , _UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , _UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , _UpperCAmelCase=False , _UpperCAmelCase=128 , _UpperCAmelCase=16 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=0.05 , _UpperCAmelCase=10 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=10 , _UpperCAmelCase=0 , _UpperCAmelCase=320 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 , _UpperCAmelCase=100 , _UpperCAmelCase=256 , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase="mean" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=256 , _UpperCAmelCase=(512, 512, 512, 512, 1_500) , _UpperCAmelCase=(5, 3, 3, 1, 1) , _UpperCAmelCase=(1, 2, 3, 1, 1) , _UpperCAmelCase=512 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=504 , **_UpperCAmelCase , ):
super().__init__(**A__ , pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ )
A : List[str] = hidden_size
A : List[str] = feat_extract_norm
A : Optional[int] = feat_extract_activation
A : Any = list(A__ )
A : Optional[int] = list(A__ )
A : Union[str, Any] = list(A__ )
A : str = conv_bias
A : Optional[int] = num_conv_pos_embeddings
A : List[Any] = num_conv_pos_embedding_groups
A : Union[str, Any] = len(self.conv_dim )
A : Dict = num_hidden_layers
A : Tuple = intermediate_size
A : int = hidden_act
A : Optional[Any] = num_attention_heads
A : List[str] = hidden_dropout
A : str = attention_dropout
A : str = activation_dropout
A : Optional[Any] = feat_proj_dropout
A : Tuple = final_dropout
A : str = layerdrop
A : Dict = layer_norm_eps
A : Optional[Any] = initializer_range
A : Dict = vocab_size
A : int = num_clusters
A : Dict = do_stable_layer_norm
A : Any = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A : str = apply_spec_augment
A : str = mask_time_prob
A : Union[str, Any] = mask_time_length
A : Optional[int] = mask_time_min_masks
A : List[Any] = mask_feature_prob
A : Union[str, Any] = mask_feature_length
A : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
A : Optional[int] = num_codevectors_per_group
A : Any = num_codevector_groups
A : List[str] = contrastive_logits_temperature
A : Union[str, Any] = feat_quantizer_dropout
A : List[Any] = num_negatives
A : List[Any] = codevector_dim
A : List[str] = proj_codevector_dim
A : Tuple = diversity_loss_weight
# ctc loss
A : Optional[Any] = ctc_loss_reduction
A : Optional[Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
A : Tuple = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
A : List[str] = list(A__ )
A : str = list(A__ )
A : Union[str, Any] = list(A__ )
A : Dict = xvector_output_dim
@property
def snake_case ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 700 |
'''simple docstring'''
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def _lowerCamelCase( UpperCamelCase__ : Dict ) -> Union[str, Any]:
A : Optional[Any] = fname.split(os.path.sep )[-1]
return re.search(R'''^(.*)_\d+\.jpg$''' , UpperCamelCase__ ).groups()[0]
class _lowercase ( a ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None ):
A : str = file_names
A : Optional[int] = image_transform
A : str = label_to_id
def __len__( self ):
return len(self.file_names )
def __getitem__( self , _UpperCAmelCase ):
A : int = self.file_names[idx]
A : int = PIL.Image.open(_UpperCAmelCase )
A : str = raw_image.convert('''RGB''' )
if self.image_transform is not None:
A : Dict = self.image_transform(_UpperCAmelCase )
A : Tuple = extract_label(_UpperCAmelCase )
if self.label_to_id is not None:
A : Optional[Any] = self.label_to_id[label]
return {"image": image, "label": label}
def _lowerCamelCase( UpperCamelCase__ : List[Any] , UpperCamelCase__ : int ) -> Any:
# Initialize accelerator
if args.with_tracking:
A : Union[str, Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
A : Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A : List[str] = config['''lr''']
A : int = int(config['''num_epochs'''] )
A : List[str] = int(config['''seed'''] )
A : Any = int(config['''batch_size'''] )
A : List[str] = config['''image_size''']
if not isinstance(UpperCamelCase__ , (list, tuple) ):
A : List[str] = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , '''isdigit''' ):
if args.checkpointing_steps == "epoch":
A : List[Any] = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
A : Optional[Any] = int(args.checkpointing_steps )
else:
raise ValueError(
F'''Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.''' )
else:
A : Optional[Any] = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
A : Any = os.path.split(UpperCamelCase__ )[-1].split('''.''' )[0]
accelerator.init_trackers(UpperCamelCase__ , UpperCamelCase__ )
# Grab all the image filenames
A : int = [os.path.join(args.data_dir , UpperCamelCase__ ) for fname in os.listdir(args.data_dir ) if fname.endswith('''.jpg''' )]
# Build the label correspondences
A : int = [extract_label(UpperCamelCase__ ) for fname in file_names]
A : str = list(set(UpperCamelCase__ ) )
id_to_label.sort()
A : Dict = {lbl: i for i, lbl in enumerate(UpperCamelCase__ )}
# Set the seed before splitting the data.
np.random.seed(UpperCamelCase__ )
torch.manual_seed(UpperCamelCase__ )
torch.cuda.manual_seed_all(UpperCamelCase__ )
# Split our filenames between train and validation
A : Dict = np.random.permutation(len(UpperCamelCase__ ) )
A : str = int(0.8 * len(UpperCamelCase__ ) )
A : Tuple = random_perm[:cut]
A : List[Any] = random_perm[cut:]
# For training we use a simple RandomResizedCrop
A : Any = Compose([RandomResizedCrop(UpperCamelCase__ , scale=(0.5, 1.0) ), ToTensor()] )
A : List[Any] = PetsDataset(
[file_names[i] for i in train_split] , image_transform=UpperCamelCase__ , label_to_id=UpperCamelCase__ )
# For evaluation, we use a deterministic Resize
A : Optional[Any] = Compose([Resize(UpperCamelCase__ ), ToTensor()] )
A : List[Any] = PetsDataset([file_names[i] for i in eval_split] , image_transform=UpperCamelCase__ , label_to_id=UpperCamelCase__ )
# Instantiate dataloaders.
A : List[str] = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
A : Tuple = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A : Union[str, Any] = create_model('''resnet50d''' , pretrained=UpperCamelCase__ , num_classes=len(UpperCamelCase__ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A : Optional[int] = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
A : Union[str, Any] = False
for param in model.get_classifier().parameters():
A : Any = True
# We normalize the batches of images to be a bit faster.
A : Dict = torch.tensor(model.default_cfg['''mean'''] )[None, :, None, None].to(accelerator.device )
A : str = torch.tensor(model.default_cfg['''std'''] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
A : List[Any] = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
A : int = OneCycleLR(optimizer=UpperCamelCase__ , max_lr=UpperCamelCase__ , epochs=UpperCamelCase__ , steps_per_epoch=len(UpperCamelCase__ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A, A, A, A, A : Optional[int] = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# We need to keep track of how many total steps we have iterated over
A : Dict = 0
# We also need to keep track of the starting epoch so files are named properly
A : str = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F'''Resumed from checkpoint: {args.resume_from_checkpoint}''' )
accelerator.load_state(args.resume_from_checkpoint )
A : Optional[int] = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
A : Dict = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
A : Optional[int] = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
A : Optional[int] = os.path.splitext(UpperCamelCase__ )[0]
if "epoch" in training_difference:
A : Tuple = int(training_difference.replace('''epoch_''' , '''''' ) ) + 1
A : Union[str, Any] = None
else:
A : int = int(training_difference.replace('''step_''' , '''''' ) )
A : str = resume_step // len(UpperCamelCase__ )
resume_step -= starting_epoch * len(UpperCamelCase__ )
# Now we train the model
for epoch in range(UpperCamelCase__ , UpperCamelCase__ ):
model.train()
if args.with_tracking:
A : int = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
A : str = accelerator.skip_first_batches(UpperCamelCase__ , UpperCamelCase__ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
A : int = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
A : Any = {k: v.to(accelerator.device ) for k, v in batch.items()}
A : Optional[int] = (batch['''image'''] - mean) / std
A : int = model(UpperCamelCase__ )
A : List[Any] = torch.nn.functional.cross_entropy(UpperCamelCase__ , batch['''label'''] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(UpperCamelCase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A : List[Any] = F'''step_{overall_step}'''
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
A : Dict = os.path.join(args.output_dir , UpperCamelCase__ )
accelerator.save_state(UpperCamelCase__ )
model.eval()
A : Optional[int] = 0
A : int = 0
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
A : Any = {k: v.to(accelerator.device ) for k, v in batch.items()}
A : Any = (batch['''image'''] - mean) / std
with torch.no_grad():
A : Union[str, Any] = model(UpperCamelCase__ )
A : Tuple = outputs.argmax(dim=-1 )
A, A : List[Any] = accelerator.gather_for_metrics((predictions, batch['''label''']) )
A : Any = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
A : str = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}: {100 * eval_metric:.2f}''' )
if args.with_tracking:
accelerator.log(
{
'''accuracy''': 100 * eval_metric,
'''train_loss''': total_loss.item() / len(UpperCamelCase__ ),
'''epoch''': epoch,
} , step=UpperCamelCase__ , )
if checkpointing_steps == "epoch":
A : Dict = F'''epoch_{epoch}'''
if args.output_dir is not None:
A : Dict = os.path.join(args.output_dir , UpperCamelCase__ )
accelerator.save_state(UpperCamelCase__ )
if args.with_tracking:
accelerator.end_training()
def _lowerCamelCase( ) -> int:
A : List[str] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument('''--data_dir''' , required=UpperCamelCase__ , help='''The data folder on disk.''' )
parser.add_argument('''--fp16''' , action='''store_true''' , help='''If passed, will use FP16 training.''' )
parser.add_argument(
'''--mixed_precision''' , type=UpperCamelCase__ , default=UpperCamelCase__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--checkpointing_steps''' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='''Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.''' , )
parser.add_argument(
'''--output_dir''' , type=UpperCamelCase__ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=UpperCamelCase__ , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
A : Tuple = parser.parse_args()
A : List[str] = {'''lr''': 3e-2, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 64, '''image_size''': 224}
training_function(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 537 | 0 |
import argparse
from collections import defaultdict
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Optional[int] = f'{file}_{class_name}_{test_name}'
done_test[_id] += 1
with open(__UpperCamelCase , 'r' ) as f:
__lowerCamelCase : List[str] = f.readlines()
__lowerCamelCase : Any = f'class {class_name}('
__lowerCamelCase : Optional[int] = f'{4 * " "}def {test_name}('
__lowerCamelCase : Optional[Any] = f'{8 * " "}{correct_line.split()[0]}'
__lowerCamelCase : Tuple = f'{16 * " "}{correct_line.split()[0]}'
__lowerCamelCase : Any = False
__lowerCamelCase : List[str] = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : int = False
__lowerCamelCase : Tuple = 0
__lowerCamelCase : List[str] = 0
__lowerCamelCase : str = []
for line in lines:
if line.startswith(__UpperCamelCase ):
__lowerCamelCase : str = True
elif in_class and line.startswith(__UpperCamelCase ):
__lowerCamelCase : Optional[int] = True
elif in_class and in_func and (line.startswith(__UpperCamelCase ) or line.startswith(__UpperCamelCase )):
__lowerCamelCase : Optional[Any] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
__lowerCamelCase : Union[str, Any] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
__lowerCamelCase : List[Any] = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f'{spaces * " "}{correct_line}' )
__lowerCamelCase : List[str] = False
else:
new_lines.append(__UpperCamelCase )
with open(__UpperCamelCase , 'w' ) as f:
for line in new_lines:
f.write(__UpperCamelCase )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
if fail is not None:
with open(__UpperCamelCase , 'r' ) as f:
__lowerCamelCase : List[str] = {l.strip() for l in f.readlines()}
else:
__lowerCamelCase : Optional[int] = None
with open(__UpperCamelCase , 'r' ) as f:
__lowerCamelCase : Dict = f.readlines()
__lowerCamelCase : Dict = defaultdict(__UpperCamelCase )
for line in correct_lines:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Union[str, Any] = line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
lowercase_ = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 669 |
lowercase__ : Optional[int] = 9.8_0665
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = g) -> float:
if fluid_density <= 0:
raise ValueError("Impossible fluid density")
if volume < 0:
raise ValueError("Impossible Object volume")
if gravity <= 0:
raise ValueError("Impossible Gravity")
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 515 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase__ = {
'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['MobileViTFeatureExtractor']
UpperCamelCase__ = ['MobileViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileViTForImageClassification',
'MobileViTForSemanticSegmentation',
'MobileViTModel',
'MobileViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileViTForImageClassification',
'TFMobileViTForSemanticSegmentation',
'TFMobileViTModel',
'TFMobileViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 701 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
UpperCamelCase__ = logging.get_logger(__name__)
@dataclass
class a ( lowercase ):
UpperCamelCase : Dict = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self , **UpperCamelCase_ ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
UpperCAmelCase__ : int = deprecated_arg[3:]
UpperCAmelCase__ : Tuple = not kwargs.pop(UpperCamelCase_ )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
UpperCAmelCase__ : Dict = kwargs.pop('tpu_name' , self.tpu_name )
UpperCAmelCase__ : Tuple = kwargs.pop('device_idx' , self.device_idx )
UpperCAmelCase__ : List[str] = kwargs.pop('eager_mode' , self.eager_mode )
UpperCAmelCase__ : Any = kwargs.pop('use_xla' , self.use_xla )
super().__init__(**UpperCamelCase_ )
UpperCamelCase : str = field(
default=lowercase , metadata={"""help""": """Name of TPU"""} , )
UpperCamelCase : int = field(
default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , )
UpperCamelCase : bool = field(default=lowercase , metadata={"""help""": """Benchmark models in eager model."""} )
UpperCamelCase : bool = field(
default=lowercase , metadata={
"""help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."""
} , )
@cached_property
def __snake_case ( self ):
requires_backends(self , ['tf'] )
UpperCAmelCase__ : Optional[Any] = None
if self.tpu:
try:
if self.tpu_name:
UpperCAmelCase__ : str = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
UpperCAmelCase__ : Optional[int] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
UpperCAmelCase__ : Tuple = None
return tpu
@cached_property
def __snake_case ( self ):
requires_backends(self , ['tf'] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
UpperCAmelCase__ : Union[str, Any] = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , 'GPU' )
UpperCAmelCase__ : Any = tf.distribute.OneDeviceStrategy(device=F'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , 'GPU' ) # disable GPU
UpperCAmelCase__ : Any = tf.distribute.OneDeviceStrategy(device=F'''/cpu:{self.device_idx}''' )
return strategy
@property
def __snake_case ( self ):
requires_backends(self , ['tf'] )
return self._setup_tpu is not None
@property
def __snake_case ( self ):
requires_backends(self , ['tf'] )
return self._setup_strategy
@property
def __snake_case ( self ):
requires_backends(self , ['tf'] )
return tf.config.list_physical_devices('GPU' )
@property
def __snake_case ( self ):
requires_backends(self , ['tf'] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def __snake_case ( self ):
return self.n_gpu > 0
| 254 | 0 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCAmelCase_ : str = logging.getLogger(__name__)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
return (preds == labels).mean()
@dataclass
class __A :
UpperCamelCase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
UpperCamelCase = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCamelCase = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
UpperCamelCase = field(
default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __A :
UpperCamelCase = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
UpperCamelCase = field(metadata={"""help""": """Should contain the data files for the task."""} )
UpperCamelCase = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
UpperCamelCase = field(
default=UpperCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def lowerCAmelCase_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__magic_name__ : Union[str, Any] =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__magic_name__ , __magic_name__ , __magic_name__ : Any =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowerCamelCase )
# Set seed
set_seed(training_args.seed )
try:
__magic_name__ : List[str] =processors[data_args.task_name]()
__magic_name__ : str =processor.get_labels()
__magic_name__ : Tuple =len(lowerCamelCase )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__magic_name__ : Optional[int] =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
__magic_name__ : Dict =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__magic_name__ : str =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
__magic_name__ : Optional[Any] =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__magic_name__ : Optional[int] =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(lowerCamelCase ) -> Dict:
__magic_name__ : List[str] =np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(lowerCamelCase , p.label_ids )}
# Data collator
__magic_name__ : Union[str, Any] =DataCollatorWithPadding(lowerCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__magic_name__ : List[str] =Trainer(
model=lowerCamelCase , args=lowerCamelCase , train_dataset=lowerCamelCase , eval_dataset=lowerCamelCase , compute_metrics=lowerCamelCase , data_collator=lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__magic_name__ : Tuple ={}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__magic_name__ : List[Any] =trainer.evaluate()
__magic_name__ : Optional[Any] =os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(lowerCamelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , lowerCamelCase , lowerCamelCase )
writer.write("""%s = %s\n""" % (key, value) )
results.update(lowerCamelCase )
return results
def lowerCAmelCase_ ( lowerCamelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 21 |
def SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> bool:
__lowercase = len(snake_case ) + 1
__lowercase = len(snake_case ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
__lowercase = [[0 for i in range(snake_case )] for j in range(snake_case )]
# since string of zero length match pattern of zero length
__lowercase = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , snake_case ):
__lowercase = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , snake_case ):
__lowercase = dp[0][j - 2] if pattern[j - 1] == '*' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , snake_case ):
for j in range(1 , snake_case ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
__lowercase = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
__lowercase = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
__lowercase = dp[i - 1][j]
else:
__lowercase = 0
else:
__lowercase = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
SCREAMING_SNAKE_CASE_ : Any = '''aab'''
SCREAMING_SNAKE_CASE_ : str = '''c*a*b'''
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F"""{input_string} matches the given pattern {pattern}""")
else:
print(F"""{input_string} does not match with the given pattern {pattern}""")
| 375 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = """dandelin/vilt-b32-finetuned-vqa"""
UpperCamelCase__ = (
"""This is a tool that answers a question about an image. It takes an input named `image` which should be the """
"""image containing the information, as well as a `question` which should be the question in English. It """
"""returns a text that is the answer to the question."""
)
UpperCamelCase__ = """image_qa"""
UpperCamelCase__ = AutoProcessor
UpperCamelCase__ = AutoModelForVisualQuestionAnswering
UpperCamelCase__ = ["""image""", """text"""]
UpperCamelCase__ = ["""text"""]
def __init__( self: int , *__lowerCamelCase: int , **__lowerCamelCase: Tuple ):
'''simple docstring'''
requires_backends(self , ["vision"] )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
def UpperCAmelCase_ ( self: List[Any] , __lowerCamelCase: "Image" , __lowerCamelCase: str ):
'''simple docstring'''
return self.pre_processor(__lowerCamelCase , __lowerCamelCase , return_tensors="pt" )
def UpperCAmelCase_ ( self: int , __lowerCamelCase: Tuple ):
'''simple docstring'''
with torch.no_grad():
return self.model(**__lowerCamelCase ).logits
def UpperCAmelCase_ ( self: str , __lowerCamelCase: str ):
'''simple docstring'''
UpperCamelCase__: List[str] = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 221 |
from __future__ import annotations
def lowerCAmelCase_ ( A_ ,A_ ,A_):
if (voltage, current, resistance).count(0) != 1:
raise ValueError("One and only one argument must be 0")
if resistance < 0:
raise ValueError("Resistance cannot be negative")
if voltage == 0:
return {"voltage": float(current * resistance)}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 221 | 1 |
import math
import sys
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if number != int(SCREAMING_SNAKE_CASE ):
raise ValueError('''the value of input must be a natural number''' )
if number < 0:
raise ValueError('''the value of input must not be a negative number''' )
if number == 0:
return 1
A_ = [-1] * (number + 1)
A_ = 0
for i in range(1 , number + 1 ):
A_ = sys.maxsize
A_ = int(math.sqrt(SCREAMING_SNAKE_CASE ) )
for j in range(1 , root + 1 ):
A_ = 1 + answers[i - (j**2)]
A_ = min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 203 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__lowercase = """pt"""
elif is_tf_available():
__lowercase = """tf"""
else:
__lowercase = """jax"""
class _lowercase ( __lowerCamelCase,unittest.TestCase ):
_lowercase : Dict = PerceiverTokenizer
_lowercase : Dict = False
def UpperCamelCase ( self : Tuple ) -> Any:
"""simple docstring"""
super().setUp()
A_ = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def UpperCamelCase ( self : List[str] , **lowerCamelCase__ : str ) -> PerceiverTokenizer:
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def UpperCamelCase ( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[Any]=False , lowerCamelCase__ : List[str]=2_0 , lowerCamelCase__ : Union[str, Any]=5 ) -> Tuple[str, list]:
"""simple docstring"""
A_ = []
for i in range(len(lowerCamelCase__ ) ):
try:
A_ = tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCamelCase__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
A_ = list(filter(lambda lowerCamelCase__ : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , lowerCamelCase__ ) )
A_ = list(filter(lambda lowerCamelCase__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowerCamelCase__ ) , lowerCamelCase__ ) )
if max_length is not None and len(lowerCamelCase__ ) > max_length:
A_ = toks[:max_length]
if min_length is not None and len(lowerCamelCase__ ) < min_length and len(lowerCamelCase__ ) > 0:
while len(lowerCamelCase__ ) < min_length:
A_ = toks + toks
# toks_str = [t[1] for t in toks]
A_ = [t[0] for t in toks]
# Ensure consistency
A_ = tokenizer.decode(lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ )
if " " not in output_txt and len(lowerCamelCase__ ) > 1:
A_ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCamelCase__ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCamelCase__ )
)
if with_prefix_space:
A_ = ''' ''' + output_txt
A_ = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
return output_txt, output_ids
def UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
A_ = self.perceiver_tokenizer
A_ = '''Unicode €.'''
A_ = tokenizer(lowerCamelCase__ )
A_ = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['''input_ids'''] , lowerCamelCase__ )
# decoding
A_ = tokenizer.decode(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , '''[CLS]Unicode €.[SEP]''' )
A_ = tokenizer('''e è é ê ë''' )
A_ = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['''input_ids'''] , lowerCamelCase__ )
# decoding
A_ = tokenizer.decode(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
A_ = self.perceiver_tokenizer
A_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
A_ = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
A_ = tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors=lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
if FRAMEWORK != "jax":
A_ = list(batch.input_ids.numpy()[0] )
else:
A_ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
A_ = self.perceiver_tokenizer
A_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
A_ = tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors=lowerCamelCase__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , lowerCamelCase__ )
self.assertIn('''attention_mask''' , lowerCamelCase__ )
self.assertNotIn('''decoder_input_ids''' , lowerCamelCase__ )
self.assertNotIn('''decoder_attention_mask''' , lowerCamelCase__ )
def UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
A_ = self.perceiver_tokenizer
A_ = [
'''Summary of the text.''',
'''Another summary.''',
]
A_ = tokenizer(
text_target=lowerCamelCase__ , max_length=3_2 , padding='''max_length''' , truncation=lowerCamelCase__ , return_tensors=lowerCamelCase__ )
self.assertEqual(3_2 , targets['''input_ids'''].shape[1] )
def UpperCamelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
A_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
A_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
A_ = tempfile.mkdtemp()
A_ = ''' He is very happy, UNwant\u00E9d,running'''
A_ = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
tokenizer.save_pretrained(lowerCamelCase__ )
A_ = tokenizer.__class__.from_pretrained(lowerCamelCase__ )
A_ = after_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
shutil.rmtree(lowerCamelCase__ )
A_ = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
A_ = tempfile.mkdtemp()
A_ = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
A_ = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
A_ = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
tokenizer.save_pretrained(lowerCamelCase__ )
A_ = tokenizer.__class__.from_pretrained(lowerCamelCase__ )
A_ = after_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
A_ = tokenizer.__class__.from_pretrained(lowerCamelCase__ , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(lowerCamelCase__ )
def UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
A_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase__ )
with open(os.path.join(lowerCamelCase__ , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
A_ = json.load(lowerCamelCase__ )
with open(os.path.join(lowerCamelCase__ , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
A_ = json.load(lowerCamelCase__ )
A_ = [F"<extra_id_{i}>" for i in range(1_2_5 )]
A_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
A_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(lowerCamelCase__ , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
with open(os.path.join(lowerCamelCase__ , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
A_ = tokenizer_class.from_pretrained(
lowerCamelCase__ , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
A_ = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=lowerCamelCase__ )]
A_ = tokenizer_class.from_pretrained(
lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def UpperCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
A_ = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , '''�''' )
def UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
pass
def UpperCamelCase ( self : int ) -> int:
"""simple docstring"""
pass
def UpperCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
pass
def UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
pass
def UpperCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
A_ = self.get_tokenizers(fast=lowerCamelCase__ , do_lower_case=lowerCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
A_ = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
A_ = tokenizer.convert_tokens_to_string(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
| 203 | 1 |
def _A ( __snake_case :int , __snake_case :int ) -> int:
"""simple docstring"""
while second != 0:
__SCREAMING_SNAKE_CASE = first & second
first ^= second
__SCREAMING_SNAKE_CASE = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case : Any = int(input('Enter the first number: ').strip())
_snake_case : str = int(input('Enter the second number: ').strip())
print(F"""{add(first, second) = }""")
| 214 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ ="""facebook/bart-large-mnli"""
SCREAMING_SNAKE_CASE__ =(
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
SCREAMING_SNAKE_CASE__ ="""text_classifier"""
SCREAMING_SNAKE_CASE__ =AutoTokenizer
SCREAMING_SNAKE_CASE__ =AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE__ =["""text""", ["""text"""]]
SCREAMING_SNAKE_CASE__ =["""text"""]
def __lowerCAmelCase ( self ) -> Dict:
super().setup()
__SCREAMING_SNAKE_CASE = self.model.config
__SCREAMING_SNAKE_CASE = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
__SCREAMING_SNAKE_CASE = int(_a )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def __lowerCAmelCase ( self, _a, _a ) -> List[str]:
__SCREAMING_SNAKE_CASE = labels
return self.pre_processor(
[text] * len(_a ), [f'''This example is {label}''' for label in labels], return_tensors="pt", padding="max_length", )
def __lowerCAmelCase ( self, _a ) -> Tuple:
__SCREAMING_SNAKE_CASE = outputs.logits
__SCREAMING_SNAKE_CASE = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 214 | 1 |
'''simple docstring'''
import random
from .binary_exp_mod import bin_exp_mod
def lowerCamelCase ( _snake_case : Tuple ,_snake_case : Optional[Any]=1_000 ):
'''simple docstring'''
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowercase__ = n - 1
lowercase__ = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowercase__ = 0
while count < prec:
lowercase__ = random.randint(2 ,n - 1 )
lowercase__ = bin_exp_mod(_snake_case ,_snake_case ,_snake_case )
if b != 1:
lowercase__ = True
for _ in range(_snake_case ):
if b == n - 1:
lowercase__ = False
break
lowercase__ = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = abs(int(input("Enter bound : ").strip()))
print("Here's the list of primes:")
print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 267 |
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class snake_case (UpperCamelCase , UpperCamelCase ):
@register_to_config
def __init__( self ,UpperCAmelCase_ = 768 ,) -> List[Any]:
super().__init__()
lowercase__ = nn.Parameter(torch.zeros(1 ,UpperCAmelCase_ ) )
lowercase__ = nn.Parameter(torch.ones(1 ,UpperCAmelCase_ ) )
def _a ( self ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,) -> Any:
lowercase__ = nn.Parameter(self.mean.to(UpperCAmelCase_ ).to(UpperCAmelCase_ ) )
lowercase__ = nn.Parameter(self.std.to(UpperCAmelCase_ ).to(UpperCAmelCase_ ) )
return self
def _a ( self ,UpperCAmelCase_ ) -> Tuple:
lowercase__ = (embeds - self.mean) * 1.0 / self.std
return embeds
def _a ( self ,UpperCAmelCase_ ) -> List[str]:
lowercase__ = (embeds * self.std) + self.mean
return embeds
| 267 | 1 |
'''simple docstring'''
from statistics import mean
import numpy as np
def A ( A_ : Tuple , A_ : List[Any] , A_ : str , A_ : Any ):
snake_case : List[Any] = 0
# Number of processes finished
snake_case : List[str] = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
snake_case : Dict = [0] * no_of_process
# List to include calculation results
snake_case : Dict = [0] * no_of_process
# Sort by arrival time.
snake_case : Tuple = [burst_time[i] for i in np.argsort(_SCREAMING_SNAKE_CASE )]
snake_case : Tuple = [process_name[i] for i in np.argsort(_SCREAMING_SNAKE_CASE )]
arrival_time.sort()
while no_of_process > finished_process_count:
snake_case : List[Any] = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
snake_case : Tuple = arrival_time[i]
snake_case : Any = 0
# Index showing the location of the process being performed
snake_case : Union[str, Any] = 0
# Saves the current response ratio.
snake_case : Dict = 0
for i in range(0 , _SCREAMING_SNAKE_CASE ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
snake_case : Optional[Any] = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
snake_case : Optional[Any] = temp
snake_case : Optional[int] = i
# Calculate the turn around time
snake_case : Dict = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
snake_case : Union[str, Any] = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def A ( A_ : Any , A_ : int , A_ : int , A_ : List[str] ):
snake_case : Tuple = [0] * no_of_process
for i in range(0 , _SCREAMING_SNAKE_CASE ):
snake_case : Tuple = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
UpperCAmelCase = 5
UpperCAmelCase = ["A", "B", "C", "D", "E"]
UpperCAmelCase = [1, 2, 3, 4, 5]
UpperCAmelCase = [1, 2, 3, 4, 5]
UpperCAmelCase = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
UpperCAmelCase = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("Process name \tArrival time \tBurst time \tTurn around time \tWaiting time")
for i in range(0, no_of_process):
print(
f'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
f'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(f'''average waiting time : {mean(waiting_time):.5f}''')
print(f'''average turn around time : {mean(turn_around_time):.5f}''')
| 701 |
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase = {
"vocab_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
},
"merges_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
},
"tokenizer_file": {
"Salesforce/codegen-350M-mono": (
"https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase = {
"Salesforce/codegen-350M-mono": 2_048,
}
class a ( __magic_name__ ):
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ['''input_ids''', '''attention_mask''']
_snake_case = CodeGenTokenizer
def __init__( self : Any, SCREAMING_SNAKE_CASE_ : Dict=None, SCREAMING_SNAKE_CASE_ : Optional[Any]=None, SCREAMING_SNAKE_CASE_ : Optional[Any]=None, SCREAMING_SNAKE_CASE_ : Optional[int]="<|endoftext|>", SCREAMING_SNAKE_CASE_ : List[Any]="<|endoftext|>", SCREAMING_SNAKE_CASE_ : List[str]="<|endoftext|>", SCREAMING_SNAKE_CASE_ : int=False, **SCREAMING_SNAKE_CASE_ : Optional[int], ):
super().__init__(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, add_prefix_space=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
if kwargs.pop('''add_bos_token''', SCREAMING_SNAKE_CASE_ ):
snake_case : Dict = kwargs.pop('''name_or_path''', '''''' )
raise ValueError(
'''Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'''
'''Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'''
F"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
F"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
'''This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'''
''' so that the fast tokenizer works correctly.''' )
snake_case : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''', SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
snake_case : Optional[Any] = getattr(SCREAMING_SNAKE_CASE_, pre_tok_state.pop('''type''' ) )
snake_case : Union[str, Any] = add_prefix_space
snake_case : Optional[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE_ )
snake_case : int = add_prefix_space
def __snake_case ( self : List[Any], *SCREAMING_SNAKE_CASE_ : Tuple, **SCREAMING_SNAKE_CASE_ : int ):
snake_case : Dict = kwargs.get('''is_split_into_words''', SCREAMING_SNAKE_CASE_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str], *SCREAMING_SNAKE_CASE_ : str, **SCREAMING_SNAKE_CASE_ : Optional[int] ):
snake_case : Dict = kwargs.get('''is_split_into_words''', SCREAMING_SNAKE_CASE_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Tuple, SCREAMING_SNAKE_CASE_ : str, SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
snake_case : Optional[int] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_, name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Dict, SCREAMING_SNAKE_CASE_ : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"], SCREAMING_SNAKE_CASE_ : bool = False, SCREAMING_SNAKE_CASE_ : bool = None, SCREAMING_SNAKE_CASE_ : Optional[List[str]] = None, **SCREAMING_SNAKE_CASE_ : Union[str, Any], ):
snake_case : Dict = super().decode(
token_ids=SCREAMING_SNAKE_CASE_, skip_special_tokens=SCREAMING_SNAKE_CASE_, clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
if truncate_before_pattern is not None and len(SCREAMING_SNAKE_CASE_ ) > 0:
snake_case : Optional[int] = self.truncate(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
return decoded_text
def __snake_case ( self : int, SCREAMING_SNAKE_CASE_ : Optional[Any], SCREAMING_SNAKE_CASE_ : Tuple ):
def find_re(SCREAMING_SNAKE_CASE_ : Optional[int], SCREAMING_SNAKE_CASE_ : Dict, SCREAMING_SNAKE_CASE_ : Optional[Any] ):
snake_case : Optional[Any] = pattern.search(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
return m.start() if m else -1
snake_case : Union[str, Any] = [re.compile(SCREAMING_SNAKE_CASE_, re.MULTILINE ) for pattern in truncate_before_pattern]
snake_case : Union[str, Any] = list(re.finditer('''^print''', SCREAMING_SNAKE_CASE_, re.MULTILINE ) )
if len(SCREAMING_SNAKE_CASE_ ) > 1:
snake_case : Tuple = completion[: prints[1].start()]
snake_case : List[str] = list(re.finditer('''^def''', SCREAMING_SNAKE_CASE_, re.MULTILINE ) )
if len(SCREAMING_SNAKE_CASE_ ) > 1:
snake_case : Optional[Any] = completion[: defs[1].start()]
snake_case : Tuple = 0
snake_case : List[Any] = [
pos for pos in [find_re(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) for terminal in terminals] if pos != -1
]
if len(SCREAMING_SNAKE_CASE_ ) > 0:
return completion[: min(SCREAMING_SNAKE_CASE_ )]
else:
return completion
| 555 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.