code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
snake_case__ : List[str] = datasets.load_iris()
snake_case__ : Union[str, Any] = np.array(data['''data'''])
snake_case__ : Optional[Any] = np.array(data['''target'''])
snake_case__ : Optional[Any] = data['''target_names''']
snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[str] = train_test_split(X, y)
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Dict ):
return np.linalg.norm(np.array(_snake_case ) - np.array(_snake_case ) )
def _snake_case ( _snake_case : List[str] , _snake_case : Tuple , _snake_case : List[Any] , _snake_case : Optional[Any] , _snake_case : Dict=5 ):
lowerCAmelCase : Optional[Any] = zip(_snake_case , _snake_case )
# List of distances of all points from the point to be classified
lowerCAmelCase : Union[str, Any] = []
for data_point in data:
lowerCAmelCase : Any = euclidean_distance(data_point[0] , _snake_case )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
lowerCAmelCase : str = [i[1] for i in sorted(_snake_case )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
lowerCAmelCase : Optional[Any] = Counter(_snake_case ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 60 |
"""simple docstring"""
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]:
if "model" in orig_key:
lowercase__ : Tuple = orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
lowercase__ : List[str] = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
lowercase__ : List[str] = orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
lowercase__ : List[str] = orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
lowercase__ : Union[str, Any] = orig_key.split('''.''' )[0].split('''_''' )[-1]
lowercase__ : List[str] = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
lowercase__ : Union[str, Any] = orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
lowercase__ : str = orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
lowercase__ : Any = orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
lowercase__ : List[Any] = orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
lowercase__ : Any = orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
lowercase__ : Optional[int] = orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
lowercase__ : Optional[Any] = orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
lowercase__ : List[str] = orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
lowercase__ : int = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
lowercase__ : Optional[Any] = orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
lowercase__ : Optional[Any] = '''yoso.''' + orig_key
return orig_key
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
for key in orig_state_dict.copy().keys():
lowercase__ : Optional[Any] = orig_state_dict.pop(__lowerCamelCase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
lowercase__ : Tuple = val
lowercase__ : Union[str, Any] = orig_state_dict['''cls.predictions.decoder.bias''']
lowercase__ : List[str] = torch.arange(__lowerCamelCase ).expand((1, -1) ) + 2
return orig_state_dict
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : Tuple = torch.load(__lowerCamelCase , map_location='''cpu''' )['''model_state_dict''']
lowercase__ : List[Any] = YosoConfig.from_json_file(__lowerCamelCase )
lowercase__ : List[Any] = YosoForMaskedLM(__lowerCamelCase )
lowercase__ : Optional[Any] = convert_checkpoint_helper(config.max_position_embeddings , __lowerCamelCase )
print(model.load_state_dict(__lowerCamelCase ) )
model.eval()
model.save_pretrained(__lowerCamelCase )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase_ = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 16 | 0 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = MgpstrTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = {}
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : int) ->List[str]:
'''simple docstring'''
super().setUp()
# fmt: off
A__ = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
A__ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__))))
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
fp.write(json.dumps(UpperCAmelCase__) + '''\n''')
def SCREAMING_SNAKE_CASE ( self : List[str] , **UpperCAmelCase__ : Optional[Any]) ->Dict:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : Any) ->Dict:
'''simple docstring'''
A__ = '''tester'''
A__ = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''')
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict:
'''simple docstring'''
A__ = self.get_tokenizers(do_lower_case=UpperCAmelCase__)
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}"""):
A__ = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token})
A__ = tokenizer.encode([special_token] , add_special_tokens=UpperCAmelCase__)
self.assertEqual(len(UpperCAmelCase__) , 1)
A__ = tokenizer.decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__)
self.assertTrue(special_token not in decoded)
def SCREAMING_SNAKE_CASE ( self : int) ->Dict:
'''simple docstring'''
A__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}"""):
A__ , A__ = self.get_input_output_texts(UpperCAmelCase__)
A__ = tokenizer.tokenize(UpperCAmelCase__)
A__ = tokenizer.convert_tokens_to_ids(UpperCAmelCase__)
A__ = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__)
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
A__ = tokenizer.convert_ids_to_tokens(UpperCAmelCase__)
self.assertNotEqual(len(UpperCAmelCase__) , 0)
A__ = tokenizer.decode(UpperCAmelCase__)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
self.assertEqual(text_a.replace(''' ''' , '''''') , UpperCAmelCase__)
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''')
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->int:
'''simple docstring'''
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''')
def SCREAMING_SNAKE_CASE ( self : Any) ->int:
'''simple docstring'''
pass
| 354 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCamelCase : Union[str, Any] = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowerCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 231 | 0 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=3 ,__UpperCAmelCase=32 ,__UpperCAmelCase=3 ,__UpperCAmelCase=10 ,__UpperCAmelCase=[10, 20, 30, 40] ,__UpperCAmelCase=[1, 1, 2, 1] ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase="relu" ,__UpperCAmelCase=3 ,__UpperCAmelCase=None ,) -> int:
lowerCAmelCase__ : List[Any] = parent
lowerCAmelCase__ : List[Any] = batch_size
lowerCAmelCase__ : Tuple = image_size
lowerCAmelCase__ : List[str] = num_channels
lowerCAmelCase__ : int = embeddings_size
lowerCAmelCase__ : List[Any] = hidden_sizes
lowerCAmelCase__ : Any = depths
lowerCAmelCase__ : Optional[Any] = is_training
lowerCAmelCase__ : Optional[int] = use_labels
lowerCAmelCase__ : Union[str, Any] = hidden_act
lowerCAmelCase__ : Union[str, Any] = num_labels
lowerCAmelCase__ : Union[str, Any] = scope
lowerCAmelCase__ : Optional[int] = len(__lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : List[Any] = None
if self.use_labels:
lowerCAmelCase__ : Dict = ids_tensor([self.batch_size] ,self.num_labels )
lowerCAmelCase__ : List[str] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ) -> int:
return RegNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> int:
lowerCAmelCase__ : int = TFRegNetModel(config=__lowerCamelCase )
lowerCAmelCase__ : List[Any] = model(__lowerCamelCase ,training=__lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : Union[str, Any] = self.num_labels
lowerCAmelCase__ : Any = TFRegNetForImageClassification(__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = model(__lowerCamelCase ,labels=__lowerCamelCase ,training=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self ) -> Any:
lowerCAmelCase__ : Dict = self.prepare_config_and_inputs()
lowerCAmelCase__ : Optional[Any] = config_and_inputs
lowerCAmelCase__ : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase_( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowercase : str = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
__lowercase : List[Any] = (
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
__lowercase : List[Any] = False
__lowercase : Tuple = False
__lowercase : Optional[int] = False
__lowercase : Dict = False
__lowercase : int = False
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : Dict = TFRegNetModelTester(self )
lowerCAmelCase__ : Optional[Any] = ConfigTester(self ,config_class=__lowerCamelCase ,has_text_modality=__lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def UpperCAmelCase_ ( self ) -> Optional[int]:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 ,reason="""TF does not support backprop for grouped convolutions on CPU.""" ,)
@slow
def UpperCAmelCase_ ( self ) -> int:
super().test_keras_fit()
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def UpperCAmelCase_ ( self ) -> List[str]:
pass
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : List[str] = model_class(__lowerCamelCase )
lowerCAmelCase__ : List[str] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : Optional[Any] = [*signature.parameters.keys()]
lowerCAmelCase__ : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,__lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
def check_hidden_states_output(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : List[str] = model_class(__lowerCamelCase )
lowerCAmelCase__ : Tuple = model(**self._prepare_for_class(__lowerCamelCase ,__lowerCamelCase ) ,training=__lowerCamelCase )
lowerCAmelCase__ : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase__ : List[Any] = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) ,expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 2, self.model_tester.image_size // 2] ,)
lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Tuple = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCAmelCase__ : Union[str, Any] = layer_type
lowerCAmelCase__ : Dict = True
check_hidden_states_output(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ : str = True
check_hidden_states_output(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
def UpperCAmelCase_ ( self ) -> str:
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase={} ):
lowerCAmelCase__ : int = model(__lowerCamelCase ,return_dict=__lowerCamelCase ,**__lowerCamelCase )
lowerCAmelCase__ : int = model(__lowerCamelCase ,return_dict=__lowerCamelCase ,**__lowerCamelCase ).to_tuple()
def recursive_check(__UpperCAmelCase ,__UpperCAmelCase ):
if isinstance(__lowerCamelCase ,(List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase ,__lowerCamelCase ):
recursive_check(__lowerCamelCase ,__lowerCamelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(__lowerCamelCase ,__lowerCamelCase ) ) ,msg=(
"""Tuple and dict output are not equal. Difference:"""
F""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"""
) ,)
recursive_check(__lowerCamelCase ,__lowerCamelCase )
for model_class in self.all_model_classes:
lowerCAmelCase__ : Union[str, Any] = model_class(__lowerCamelCase )
lowerCAmelCase__ : int = self._prepare_for_class(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : int = self._prepare_for_class(__lowerCamelCase ,__lowerCamelCase )
check_equivalence(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = self._prepare_for_class(__lowerCamelCase ,__lowerCamelCase ,return_labels=__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = self._prepare_for_class(__lowerCamelCase ,__lowerCamelCase ,return_labels=__lowerCamelCase )
check_equivalence(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = self._prepare_for_class(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : Tuple = self._prepare_for_class(__lowerCamelCase ,__lowerCamelCase )
check_equivalence(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,{"""output_hidden_states""": True} )
lowerCAmelCase__ : Optional[int] = self._prepare_for_class(__lowerCamelCase ,__lowerCamelCase ,return_labels=__lowerCamelCase )
lowerCAmelCase__ : str = self._prepare_for_class(__lowerCamelCase ,__lowerCamelCase ,return_labels=__lowerCamelCase )
check_equivalence(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,{"""output_hidden_states""": True} )
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def UpperCAmelCase_ ( self ) -> Optional[int]:
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : int = TFRegNetModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
lowerCAmelCase__ : int = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCAmelCase__ : List[str] = self.default_image_processor
lowerCAmelCase__ : str = prepare_img()
lowerCAmelCase__ : Optional[int] = image_processor(images=__lowerCamelCase ,return_tensors="""tf""" )
# forward pass
lowerCAmelCase__ : Dict = model(**__lowerCamelCase ,training=__lowerCamelCase )
# verify the logits
lowerCAmelCase__ : Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,__lowerCamelCase )
lowerCAmelCase__ : int = tf.constant([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] ,__lowerCamelCase ,atol=1E-4 )
| 37 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def __lowercase ( snake_case_ : int ) ->Tuple:
'''simple docstring'''
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x2_0000 and cp <= 0x2_a6df) #
or (cp >= 0x2_a700 and cp <= 0x2_b73f) #
or (cp >= 0x2_b740 and cp <= 0x2_b81f) #
or (cp >= 0x2_b820 and cp <= 0x2_ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2_f800 and cp <= 0x2_fa1f) #
): #
return True
return False
def __lowercase ( snake_case_ : str ) ->Dict:
'''simple docstring'''
for char in word:
__A : int = ord(snake_case_ )
if not _is_chinese_char(snake_case_ ):
return 0
return 1
def __lowercase ( snake_case_ : List[str] ) ->List[Any]:
'''simple docstring'''
__A : str = set()
for token in tokens:
__A : List[Any] = len(snake_case_ ) > 1 and is_chinese(snake_case_ )
if chinese_word:
word_set.add(snake_case_ )
__A : Any = list(snake_case_ )
return word_list
def __lowercase ( snake_case_ : List[str] ,snake_case_ : set() ) ->Any:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
__A : List[Any] = max([len(snake_case_ ) for w in chinese_word_set] )
__A : List[str] = bert_tokens
__A , __A : Any = 0, len(snake_case_ )
while start < end:
__A : str = True
if is_chinese(bert_word[start] ):
__A : int = min(end - start ,snake_case_ )
for i in range(snake_case_ ,1 ,-1 ):
__A : Any = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 ,start + i ):
__A : Any = '''##''' + bert_word[j]
__A : Optional[int] = start + i
__A : str = False
break
if single_word:
start += 1
return bert_word
def __lowercase ( snake_case_ : List[str] ,snake_case_ : LTP ,snake_case_ : BertTokenizer ) ->Dict:
'''simple docstring'''
__A : Optional[Any] = []
for i in range(0 ,len(snake_case_ ) ,100 ):
__A : int = ltp_tokenizer.seg(lines[i : i + 100] )[0]
__A : List[Any] = [get_chinese_word(snake_case_ ) for r in res]
ltp_res.extend(snake_case_ )
assert len(snake_case_ ) == len(snake_case_ )
__A : Any = []
for i in range(0 ,len(snake_case_ ) ,100 ):
__A : Tuple = bert_tokenizer(lines[i : i + 100] ,add_special_tokens=snake_case_ ,truncation=snake_case_ ,max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(snake_case_ ) == len(snake_case_ )
__A : Optional[int] = []
for input_ids, chinese_word in zip(snake_case_ ,snake_case_ ):
__A : List[str] = []
for id in input_ids:
__A : Tuple = bert_tokenizer._convert_id_to_token(snake_case_ )
input_tokens.append(snake_case_ )
__A : Optional[int] = add_sub_symbol(snake_case_ ,snake_case_ )
__A : Optional[Any] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(snake_case_ ):
if token[:2] == "##":
__A : Optional[Any] = token[2:]
# save chinese tokens' pos
if len(snake_case_ ) == 1 and _is_chinese_char(ord(snake_case_ ) ):
ref_id.append(snake_case_ )
ref_ids.append(snake_case_ )
assert len(snake_case_ ) == len(snake_case_ )
return ref_ids
def __lowercase ( snake_case_ : int ) ->List[Any]:
'''simple docstring'''
with open(args.file_name ,'''r''' ,encoding='''utf-8''' ) as f:
__A : List[str] = f.readlines()
__A : Optional[Any] = [line.strip() for line in data if len(snake_case_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__A : str = LTP(args.ltp ) # faster in GPU device
__A : Optional[int] = BertTokenizer.from_pretrained(args.bert )
__A : Optional[Any] = prepare_ref(snake_case_ ,snake_case_ ,snake_case_ )
with open(args.save_path ,'''w''' ,encoding='''utf-8''' ) as f:
__A : int = [json.dumps(snake_case_ ) + '''\n''' for ref in ref_ids]
f.writelines(snake_case_ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
a_ = parser.parse_args()
main(args)
| 179 | 0 |
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = 10**9 ) -> int:
'''simple docstring'''
lowerCAmelCase : Any = 1
lowerCAmelCase : str = 2
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : Any = 0
lowerCAmelCase : Tuple = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
lowerCAmelCase : Tuple = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F'{solution() = }')
| 323 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Union[str, Any] = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323 | 1 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def lowercase__ ( snake_case_ :Tuple ):
__UpperCAmelCase = botoa.client('''iam''' )
__UpperCAmelCase = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{'''Effect''': '''Allow''', '''Principal''': {'''Service''': '''sagemaker.amazonaws.com'''}, '''Action''': '''sts:AssumeRole'''}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=snake_case_ , AssumeRolePolicyDocument=json.dumps(snake_case_ , indent=2 ) )
__UpperCAmelCase = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{
'''Effect''': '''Allow''',
'''Action''': [
'''sagemaker:*''',
'''ecr:GetDownloadUrlForLayer''',
'''ecr:BatchGetImage''',
'''ecr:BatchCheckLayerAvailability''',
'''ecr:GetAuthorizationToken''',
'''cloudwatch:PutMetricData''',
'''cloudwatch:GetMetricData''',
'''cloudwatch:GetMetricStatistics''',
'''cloudwatch:ListMetrics''',
'''logs:CreateLogGroup''',
'''logs:CreateLogStream''',
'''logs:DescribeLogStreams''',
'''logs:PutLogEvents''',
'''logs:GetLogEvents''',
'''s3:CreateBucket''',
'''s3:ListBucket''',
'''s3:GetBucketLocation''',
'''s3:GetObject''',
'''s3:PutObject''',
],
'''Resource''': '''*''',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=snake_case_ , PolicyName=F'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(snake_case_ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F'''role {role_name} already exists. Using existing one''' )
def lowercase__ ( snake_case_ :List[Any] ):
__UpperCAmelCase = botoa.client('''iam''' )
return iam_client.get_role(RoleName=snake_case_ )["Role"]["Arn"]
def lowercase__ ( ):
__UpperCAmelCase = _ask_options(
'''How do you want to authorize?''' , ['''AWS Profile''', '''Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '''] , snake_case_ , )
__UpperCAmelCase = None
if credentials_configuration == 0:
__UpperCAmelCase = _ask_field('''Enter your AWS Profile name: [default] ''' , default='''default''' )
__UpperCAmelCase = aws_profile
else:
print(
'''Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'''
'''`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`''' )
__UpperCAmelCase = _ask_field('''AWS Access Key ID: ''' )
__UpperCAmelCase = aws_access_key_id
__UpperCAmelCase = _ask_field('''AWS Secret Access Key: ''' )
__UpperCAmelCase = aws_secret_access_key
__UpperCAmelCase = _ask_field('''Enter your AWS Region: [us-east-1]''' , default='''us-east-1''' )
__UpperCAmelCase = aws_region
__UpperCAmelCase = _ask_options(
'''Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?''' , ['''Provide IAM Role name''', '''Create new IAM role using credentials'''] , snake_case_ , )
if role_management == 0:
__UpperCAmelCase = _ask_field('''Enter your IAM role name: ''' )
else:
__UpperCAmelCase = '''accelerate_sagemaker_execution_role'''
print(F'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' )
_create_iam_role_for_sagemaker(snake_case_ )
__UpperCAmelCase = _ask_field(
'''Do you want to use custom Docker image? [yes/NO]: ''' , _convert_yes_no_to_bool , default=snake_case_ , error_message='''Please enter yes or no.''' , )
__UpperCAmelCase = None
if is_custom_docker_image:
__UpperCAmelCase = _ask_field('''Enter your Docker image: ''' , lambda snake_case_ : str(snake_case_ ).lower() )
__UpperCAmelCase = _ask_field(
'''Do you want to provide SageMaker input channels with data locations? [yes/NO]: ''' , _convert_yes_no_to_bool , default=snake_case_ , error_message='''Please enter yes or no.''' , )
__UpperCAmelCase = None
if is_sagemaker_inputs_enabled:
__UpperCAmelCase = _ask_field(
'''Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ''' , lambda snake_case_ : str(snake_case_ ).lower() , )
__UpperCAmelCase = _ask_field(
'''Do you want to enable SageMaker metrics? [yes/NO]: ''' , _convert_yes_no_to_bool , default=snake_case_ , error_message='''Please enter yes or no.''' , )
__UpperCAmelCase = None
if is_sagemaker_metrics_enabled:
__UpperCAmelCase = _ask_field(
'''Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ''' , lambda snake_case_ : str(snake_case_ ).lower() , )
__UpperCAmelCase = _ask_options(
'''What is the distributed mode?''' , ['''No distributed training''', '''Data parallelism'''] , _convert_sagemaker_distributed_mode , )
__UpperCAmelCase = {}
__UpperCAmelCase = _ask_field(
'''Do you wish to optimize your script with torch dynamo?[yes/NO]:''' , _convert_yes_no_to_bool , default=snake_case_ , error_message='''Please enter yes or no.''' , )
if use_dynamo:
__UpperCAmelCase = '''dynamo_'''
__UpperCAmelCase = _ask_options(
'''Which dynamo backend would you like to use?''' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
__UpperCAmelCase = _ask_field(
'''Do you want to customize the defaults sent to torch.compile? [yes/NO]: ''' , _convert_yes_no_to_bool , default=snake_case_ , error_message='''Please enter yes or no.''' , )
if use_custom_options:
__UpperCAmelCase = _ask_options(
'''Which mode do you want to use?''' , snake_case_ , lambda snake_case_ : TORCH_DYNAMO_MODES[int(snake_case_ )] , default='''default''' , )
__UpperCAmelCase = _ask_field(
'''Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ''' , _convert_yes_no_to_bool , default=snake_case_ , error_message='''Please enter yes or no.''' , )
__UpperCAmelCase = _ask_field(
'''Do you want to enable dynamic shape tracing? [yes/NO]: ''' , _convert_yes_no_to_bool , default=snake_case_ , error_message='''Please enter yes or no.''' , )
__UpperCAmelCase = '''Which EC2 instance type you want to use for your training?'''
if distributed_type != SageMakerDistributedType.NO:
__UpperCAmelCase = _ask_options(
snake_case_ , snake_case_ , lambda snake_case_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(snake_case_ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
__UpperCAmelCase = _ask_field(snake_case_ , lambda snake_case_ : str(snake_case_ ).lower() , default='''ml.p3.2xlarge''' )
__UpperCAmelCase = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
__UpperCAmelCase = _ask_field(
'''How many machines do you want use? [1]: ''' , snake_case_ , default=1 , )
__UpperCAmelCase = _ask_options(
'''Do you wish to use FP16 or BF16 (mixed precision)?''' , ['''no''', '''fp16''', '''bf16''', '''fp8'''] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
'''Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.''' )
return SageMakerConfig(
image_uri=snake_case_ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=snake_case_ , use_cpu=snake_case_ , dynamo_config=snake_case_ , eca_instance_type=snake_case_ , profile=snake_case_ , region=snake_case_ , iam_role_name=snake_case_ , mixed_precision=snake_case_ , num_machines=snake_case_ , sagemaker_inputs_file=snake_case_ , sagemaker_metrics_file=snake_case_ , )
| 332 |
"""simple docstring"""
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def lowercase__ ( snake_case_ :Optional[int] ):
return EnvironmentCommand()
def lowercase__ ( snake_case_ :List[str] ):
return EnvironmentCommand(args.accelerate_config_file )
class _UpperCAmelCase ( _lowerCAmelCase ):
@staticmethod
def a ( _lowercase : ArgumentParser ):
__UpperCAmelCase = parser.add_parser('''env''' )
download_parser.set_defaults(func=_lowercase )
download_parser.add_argument(
'''--accelerate-config_file''' , default=_lowercase , help='''The accelerate config file to use for the default values in the launching script.''' , )
download_parser.set_defaults(func=_lowercase )
def __init__( self : Optional[int] , _lowercase : str , *_lowercase : Tuple ):
__UpperCAmelCase = accelerate_config_file
def a ( self : Dict ):
__UpperCAmelCase = '''not installed'''
if is_safetensors_available():
import safetensors
__UpperCAmelCase = safetensors.__version__
elif importlib.util.find_spec('''safetensors''' ) is not None:
import safetensors
__UpperCAmelCase = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
__UpperCAmelCase = '''not installed'''
__UpperCAmelCase = __UpperCAmelCase = '''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
__UpperCAmelCase = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(_lowercase ):
__UpperCAmelCase = load_config_from_file(self._accelerate_config_file ).to_dict()
__UpperCAmelCase = (
'''\n'''.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(_lowercase , _lowercase )
else F'''\t{accelerate_config}'''
)
__UpperCAmelCase = '''not installed'''
__UpperCAmelCase = '''NA'''
if is_torch_available():
import torch
__UpperCAmelCase = torch.__version__
__UpperCAmelCase = torch.cuda.is_available()
__UpperCAmelCase = '''not installed'''
__UpperCAmelCase = '''NA'''
if is_tf_available():
import tensorflow as tf
__UpperCAmelCase = tf.__version__
try:
# deprecated in v2.1
__UpperCAmelCase = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
__UpperCAmelCase = bool(tf.config.list_physical_devices('''GPU''' ) )
__UpperCAmelCase = '''not installed'''
__UpperCAmelCase = '''not installed'''
__UpperCAmelCase = '''not installed'''
__UpperCAmelCase = '''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
__UpperCAmelCase = flax.__version__
__UpperCAmelCase = jax.__version__
__UpperCAmelCase = jaxlib.__version__
__UpperCAmelCase = jax.lib.xla_bridge.get_backend().platform
__UpperCAmelCase = {
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': F'''{safetensors_version}''',
'''Accelerate version''': F'''{accelerate_version}''',
'''Accelerate config''': F'''{accelerate_config_str}''',
'''PyTorch version (GPU?)''': F'''{pt_version} ({pt_cuda_available})''',
'''Tensorflow version (GPU?)''': F'''{tf_version} ({tf_cuda_available})''',
'''Flax version (CPU?/GPU?/TPU?)''': F'''{flax_version} ({jax_backend})''',
'''Jax version''': F'''{jax_version}''',
'''JaxLib version''': F'''{jaxlib_version}''',
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(_lowercase ) )
return info
@staticmethod
def a ( _lowercase : str ):
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 332 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__A = {
'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['MobileViTFeatureExtractor']
__A = ['MobileViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileViTForImageClassification',
'MobileViTForSemanticSegmentation',
'MobileViTModel',
'MobileViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileViTForImageClassification',
'TFMobileViTForSemanticSegmentation',
'TFMobileViTModel',
'TFMobileViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 367 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = '''llama'''
lowerCamelCase = ['''past_key_values''']
def __init__( self , __UpperCAmelCase=3_20_00 , __UpperCAmelCase=40_96 , __UpperCAmelCase=1_10_08 , __UpperCAmelCase=32 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase="silu" , __UpperCAmelCase=20_48 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-6 , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=False , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Optional[Any]:
_lowerCAmelCase =vocab_size
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =hidden_size
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =num_key_value_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =initializer_range
_lowerCAmelCase =rms_norm_eps
_lowerCAmelCase =pretraining_tp
_lowerCAmelCase =use_cache
_lowerCAmelCase =rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase , )
def _lowerCAmelCase ( self ) -> str:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'''got {self.rope_scaling}''' )
_lowerCAmelCase =self.rope_scaling.get("""type""" , __UpperCAmelCase )
_lowerCAmelCase =self.rope_scaling.get("""factor""" , __UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 341 | 0 |
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _lowerCAmelCase ( snake_case_ ):
def lowerCamelCase ( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
with open(UpperCamelCase__ , encoding="utf-8" ) as input_file:
snake_case : int = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
snake_case : List[str] = input_file.read()
snake_case : List[Any] = regexp.search(UpperCamelCase__ )
return match
def lowerCamelCase ( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
with open(UpperCamelCase__ , encoding="utf-8" ) as input_file:
snake_case : Optional[int] = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
snake_case : Dict = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
snake_case : str = regexp.finditer(UpperCamelCase__ )
snake_case : Optional[Any] = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : int = Path("./datasets" )
snake_case : Optional[int] = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(UpperCamelCase__ ) ):
raise AssertionError(F'open(...) must use utf-8 encoding in {dataset}' )
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : Any = Path("./datasets" )
snake_case : Any = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(UpperCamelCase__ ) ):
raise AssertionError(F'print statement found in {dataset}. Use datasets.logger/logging instead.' )
| 203 |
"""simple docstring"""
def __lowerCAmelCase ( lowercase : list[int] ) -> float:
"""simple docstring"""
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty" )
snake_case : List[str] = sum(lowercase ) / len(lowercase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 203 | 1 |
from __future__ import annotations
from math import ceil, floor, sqrt
def _a ( a :int = 2_000_000 ) -> int:
a = [0]
a = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
a = 0
# the area corresponding to the grid that gives the product closest to target
a = 0
# an estimate of b, using the quadratic formula
a = 42
# the largest integer less than b_estimate
a = 42
# the largest integer less than b_estimate
a = 42
# the triangle number corresponding to b_floor
a = 42
# the triangle number corresponding to b_ceil
a = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
a = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
a = floor(a )
a = ceil(a )
a = triangle_numbers[b_floor]
a = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
a = triangle_b_first_guess * triangle_a
a = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
a = triangle_b_second_guess * triangle_a
a = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26 |
from math import ceil, sqrt
def _a ( a :int = 1_000_000 ) -> int:
a = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
a = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
a = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26 | 1 |
'''simple docstring'''
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
while second != 0:
UpperCAmelCase__ : List[Any] = first & second
first ^= second
UpperCAmelCase__ : Dict = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = int(input('''Enter the first number: ''').strip())
UpperCamelCase__ = int(input('''Enter the second number: ''').strip())
print(F"""{add(first, second) = }""")
| 181 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = """umt5"""
lowerCAmelCase__ = ["""past_key_values"""]
def __init__( self : Optional[int] , _lowerCAmelCase : int=2_5_0_1_1_2 , _lowerCAmelCase : Union[str, Any]=5_1_2 , _lowerCAmelCase : List[Any]=6_4 , _lowerCAmelCase : Optional[Any]=1_0_2_4 , _lowerCAmelCase : Union[str, Any]=8 , _lowerCAmelCase : Any=None , _lowerCAmelCase : Tuple=6 , _lowerCAmelCase : str=3_2 , _lowerCAmelCase : List[str]=1_2_8 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : Tuple=1e-6 , _lowerCAmelCase : List[Any]=1.0 , _lowerCAmelCase : Union[str, Any]="gated-gelu" , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : int=True , _lowerCAmelCase : Tuple="T5Tokenizer" , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : List[str]=0 , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : Any=0 , **_lowerCAmelCase : int , ):
'''simple docstring'''
super().__init__(
is_encoder_decoder=_lowerCAmelCase , tokenizer_class=_lowerCAmelCase , tie_word_embeddings=_lowerCAmelCase , pad_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
__lowercase =vocab_size
__lowercase =d_model
__lowercase =d_kv
__lowercase =d_ff
__lowercase =num_layers
__lowercase =(
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__lowercase =num_heads
__lowercase =relative_attention_num_buckets
__lowercase =relative_attention_max_distance
__lowercase =dropout_rate
__lowercase =layer_norm_epsilon
__lowercase =initializer_factor
__lowercase =feed_forward_proj
__lowercase =use_cache
__lowercase =self.feed_forward_proj.split('-')
__lowercase =act_info[-1]
__lowercase =act_info[0] == 'gated'
if len(_lowerCAmelCase) > 1 and act_info[0] != "gated" or len(_lowerCAmelCase) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'')
if feed_forward_proj == "gated-gelu":
__lowercase ='gelu_new'
@property
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return self.d_model
@property
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return self.num_heads
@property
def __lowerCamelCase ( self : int):
'''simple docstring'''
return self.num_layers
class _UpperCamelCase ( A ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase ={
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
__lowercase ='past_encoder_sequence + sequence'
__lowercase ={0: 'batch'}
__lowercase ={0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__lowercase ={0: 'batch', 1: 'decoder_sequence'}
__lowercase ={0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction='inputs')
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return 1_3
@property
def __lowerCamelCase ( self : int):
'''simple docstring'''
return 5e-4
| 166 | 0 |
'''simple docstring'''
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
_UpperCamelCase = version.parse(importlib_metadata.version('''nltk'''))
if NLTK_VERSION >= version.Version('''3.6.4'''):
from nltk import word_tokenize
_UpperCamelCase = '''\
@inproceedings{banarjee2005,
title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},
author = {Banerjee, Satanjeev and Lavie, Alon},
booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},
month = jun,
year = {2005},
address = {Ann Arbor, Michigan},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/W05-0909},
pages = {65--72},
}
'''
_UpperCamelCase = '''\
METEOR, an automatic metric for machine translation evaluation
that is based on a generalized concept of unigram matching between the
machine-produced translation and human-produced reference translations.
Unigrams can be matched based on their surface forms, stemmed forms,
and meanings; furthermore, METEOR can be easily extended to include more
advanced matching strategies. Once all generalized unigram matches
between the two strings have been found, METEOR computes a score for
this matching using a combination of unigram-precision, unigram-recall, and
a measure of fragmentation that is designed to directly capture how
well-ordered the matched words in the machine translation are in relation
to the reference.
METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic
data and 0.331 on the Chinese data. This is shown to be an improvement on
using simply unigram-precision, unigram-recall and their harmonic F1
combination.
'''
_UpperCamelCase = '''
Computes METEOR score of translated segments against one or more references.
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
alpha: Parameter for controlling relative weights of precision and recall. default: 0.9
beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3
gamma: Relative weight assigned to fragmentation penalty. default: 0.5
Returns:
\'meteor\': meteor score.
Examples:
>>> meteor = datasets.load_metric(\'meteor\')
>>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]
>>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]
>>> results = meteor.compute(predictions=predictions, references=references)
>>> print(round(results["meteor"], 4))
0.6944
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"""] , reference_urls=[
"""https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score""",
"""https://en.wikipedia.org/wiki/METEOR""",
] , )
def __A ( self , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
import nltk
nltk.download("""wordnet""" )
if NLTK_VERSION >= version.Version("""3.6.5""" ):
nltk.download("""punkt""" )
if NLTK_VERSION >= version.Version("""3.6.6""" ):
nltk.download("""omw-1.4""" )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=0.9 , __UpperCAmelCase=3 , __UpperCAmelCase=0.5 ) -> List[Any]:
'''simple docstring'''
if NLTK_VERSION >= version.Version("""3.6.5""" ):
__UpperCAmelCase : int = [
meteor_score.single_meteor_score(
word_tokenize(__UpperCAmelCase ) , word_tokenize(__UpperCAmelCase ) , alpha=__UpperCAmelCase , beta=__UpperCAmelCase , gamma=__UpperCAmelCase )
for ref, pred in zip(__UpperCAmelCase , __UpperCAmelCase )
]
else:
__UpperCAmelCase : Dict = [
meteor_score.single_meteor_score(__UpperCAmelCase , __UpperCAmelCase , alpha=__UpperCAmelCase , beta=__UpperCAmelCase , gamma=__UpperCAmelCase )
for ref, pred in zip(__UpperCAmelCase , __UpperCAmelCase )
]
return {"meteor": np.mean(__UpperCAmelCase )}
| 16 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _A ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Dict = LDMTextToImagePipeline
_SCREAMING_SNAKE_CASE : Tuple = TEXT_TO_IMAGE_PARAMS - {
"negative_prompt",
"negative_prompt_embeds",
"cross_attention_kwargs",
"prompt_embeds",
}
_SCREAMING_SNAKE_CASE : List[Any] = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"callback",
"callback_steps",
}
_SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
_SCREAMING_SNAKE_CASE : List[str] = False
def __A ( self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
__UpperCAmelCase : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
__UpperCAmelCase : List[Any] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , )
torch.manual_seed(0 )
__UpperCAmelCase : Any = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
__UpperCAmelCase : Tuple = CLIPTextModel(__UpperCAmelCase )
__UpperCAmelCase : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__UpperCAmelCase : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vqvae""": vae,
"""bert""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def __A ( self , __UpperCAmelCase , __UpperCAmelCase=0 ) -> Any:
'''simple docstring'''
if str(__UpperCAmelCase ).startswith("""mps""" ):
__UpperCAmelCase : int = torch.manual_seed(__UpperCAmelCase )
else:
__UpperCAmelCase : List[str] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__UpperCAmelCase : Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : Dict = self.get_dummy_components()
__UpperCAmelCase : Tuple = LDMTextToImagePipeline(**__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = self.get_dummy_inputs(__UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = pipe(**__UpperCAmelCase ).images
__UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
__UpperCAmelCase : Dict = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def __A ( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self , __UpperCAmelCase , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 ) -> int:
'''simple docstring'''
__UpperCAmelCase : Tuple = torch.manual_seed(__UpperCAmelCase )
__UpperCAmelCase : int = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 32, 32) )
__UpperCAmelCase : int = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase )
__UpperCAmelCase : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ) -> str:
'''simple docstring'''
__UpperCAmelCase : Any = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = self.get_inputs(__UpperCAmelCase )
__UpperCAmelCase : int = pipe(**__UpperCAmelCase ).images
__UpperCAmelCase : Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
__UpperCAmelCase : Tuple = np.array([0.5_1825, 0.5_2850, 0.5_2543, 0.5_4258, 0.5_2304, 0.5_2569, 0.5_4363, 0.5_5276, 0.5_6878] )
__UpperCAmelCase : Union[str, Any] = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class _A ( unittest.TestCase ):
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self , __UpperCAmelCase , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = torch.manual_seed(__UpperCAmelCase )
__UpperCAmelCase : List[Any] = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 32, 32) )
__UpperCAmelCase : int = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = self.get_inputs(__UpperCAmelCase )
__UpperCAmelCase : Optional[int] = pipe(**__UpperCAmelCase ).images[0]
__UpperCAmelCase : Tuple = load_numpy(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy""" )
__UpperCAmelCase : Dict = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 16 | 1 |
'''simple docstring'''
def UpperCamelCase_( snake_case : int ):
'''simple docstring'''
if not isinstance(snake_case , snake_case ):
raise TypeError("only integers accepted as input" )
else:
snake_case_ = str(abs(snake_case ) )
snake_case_ = [list(snake_case ) for char in range(len(snake_case ) )]
for index in range(len(snake_case ) ):
num_transpositions[index].pop(snake_case )
return max(
int("".join(list(snake_case ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 85 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCamelCase_( snake_case : Tuple ):
'''simple docstring'''
snake_case_ = FileLock(str(tmpdir / "foo.lock" ) )
snake_case_ = FileLock(str(tmpdir / "foo.lock" ) )
snake_case_ = 0.01
with locka.acquire():
with pytest.raises(snake_case ):
snake_case_ = time.time()
locka.acquire(snake_case )
assert time.time() - _start > timeout
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = "a" * 1_0_0_0 + ".lock"
snake_case_ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(snake_case )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
snake_case_ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(snake_case ):
locka.acquire(0 )
| 85 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_a = {"""configuration_speech_encoder_decoder""": ["""SpeechEncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ["""SpeechEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ["""FlaxSpeechEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
_a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 100 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"""facebook/xlm-roberta-xl""": """https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json""",
"""facebook/xlm-roberta-xxl""": """https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json""",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'xlm-roberta-xl'
def __init__( self , __a=25_08_80 , __a=25_60 , __a=36 , __a=32 , __a=1_02_40 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_14 , __a=1 , __a=0.02 , __a=1e-05 , __a=1 , __a=0 , __a=2 , __a="absolute" , __a=True , __a=None , **__a , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a)
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = position_embedding_type
_UpperCamelCase = use_cache
_UpperCamelCase = classifier_dropout
class _UpperCAmelCase( lowerCamelCase ):
@property
def UpperCAmelCase ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_UpperCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_UpperCamelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
])
| 100 | 1 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=False , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> Optional[int]:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =seq_length
__UpperCamelCase =is_training
__UpperCamelCase =use_input_mask
__UpperCamelCase =use_token_type_ids
__UpperCamelCase =use_labels
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =type_vocab_size
__UpperCamelCase =type_sequence_label_size
__UpperCamelCase =initializer_range
__UpperCamelCase =num_labels
__UpperCamelCase =num_choices
__UpperCamelCase =scope
def _a ( self ) -> Dict:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =None
if self.use_input_mask:
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self ) -> Optional[Any]:
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
__UpperCamelCase =BioGptModel(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , attention_mask=A_ )
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Union[str, Any]:
__UpperCamelCase =BioGptForCausalLM(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , *A_ ) -> str:
__UpperCamelCase =BioGptModel(config=A_ )
model.to(A_ )
model.eval()
# create attention mask
__UpperCamelCase =torch.ones(input_ids.shape , dtype=torch.long , device=A_ )
__UpperCamelCase =self.seq_length // 2
__UpperCamelCase =0
# first forward pass
__UpperCamelCase , __UpperCamelCase =model(A_ , attention_mask=A_ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCamelCase =ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
__UpperCamelCase =ids_tensor((1,) , A_ ).item() + 1
__UpperCamelCase =ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
__UpperCamelCase =random_other_next_tokens
# append to next input_ids and attn_mask
__UpperCamelCase =torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCamelCase =torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=A_ )] , dim=1 , )
# get two different outputs
__UpperCamelCase =model(A_ , attention_mask=A_ )['last_hidden_state']
__UpperCamelCase =model(A_ , past_key_values=A_ , attention_mask=A_ )['last_hidden_state']
# select random slice
__UpperCamelCase =ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCamelCase =output_from_no_past[:, -1, random_slice_idx].detach()
__UpperCamelCase =output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A_ , A_ , atol=1E-3 ) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , *A_ ) -> Tuple:
__UpperCamelCase =BioGptModel(config=A_ ).to(A_ ).eval()
__UpperCamelCase =torch.ones(input_ids.shape , dtype=torch.long , device=A_ )
# first forward pass
__UpperCamelCase =model(A_ , attention_mask=A_ , use_cache=A_ )
__UpperCamelCase , __UpperCamelCase =outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__UpperCamelCase =ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCamelCase =ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
__UpperCamelCase =torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCamelCase =torch.cat([attention_mask, next_attn_mask] , dim=-1 )
__UpperCamelCase =model(A_ , attention_mask=A_ )['last_hidden_state']
__UpperCamelCase =model(A_ , attention_mask=A_ , past_key_values=A_ )[
'last_hidden_state'
]
# select random slice
__UpperCamelCase =ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCamelCase =output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCamelCase =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A_ , A_ , atol=1E-3 ) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , *A_ , A_=False ) -> List[str]:
__UpperCamelCase =BioGptForCausalLM(A_ )
model.to(A_ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__UpperCamelCase =model(A_ , labels=A_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def _a ( self , A_ , *A_ ) -> Tuple:
__UpperCamelCase =BioGptModel(A_ )
__UpperCamelCase =model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def _a ( self , A_ , A_ , A_ , A_ , A_ , *A_ ) -> Tuple:
__UpperCamelCase =self.num_labels
__UpperCamelCase =BioGptForTokenClassification(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , attention_mask=A_ , token_type_ids=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self ) -> int:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A_ , A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
UpperCAmelCase__ : int = (BioGptForCausalLM,) if is_torch_available() else ()
UpperCAmelCase__ : Any = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Tuple = False
def _a ( self ) -> Dict:
__UpperCamelCase =BioGptModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , hidden_size=37 )
def _a ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCamelCase =type
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*A_ )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*A_ , gradient_checkpointing=A_ )
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*A_ )
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*A_ )
@slow
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(A_ )
__UpperCamelCase =BioGptTokenizer.from_pretrained('microsoft/biogpt' )
__UpperCamelCase ='left'
# Define PAD Token = EOS Token = 50256
__UpperCamelCase =tokenizer.eos_token
__UpperCamelCase =model.config.eos_token_id
# use different length sentences to test batching
__UpperCamelCase =[
'Hello, my dog is a little',
'Today, I',
]
__UpperCamelCase =tokenizer(A_ , return_tensors='pt' , padding=A_ )
__UpperCamelCase =inputs['input_ids'].to(A_ )
__UpperCamelCase =model.generate(
input_ids=A_ , attention_mask=inputs['attention_mask'].to(A_ ) , )
__UpperCamelCase =tokenizer(sentences[0] , return_tensors='pt' ).input_ids.to(A_ )
__UpperCamelCase =model.generate(input_ids=A_ )
__UpperCamelCase =inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item()
__UpperCamelCase =tokenizer(sentences[1] , return_tensors='pt' ).input_ids.to(A_ )
__UpperCamelCase =model.generate(input_ids=A_ , max_length=model.config.max_length - num_paddings )
__UpperCamelCase =tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
__UpperCamelCase =tokenizer.decode(output_non_padded[0] , skip_special_tokens=A_ )
__UpperCamelCase =tokenizer.decode(output_padded[0] , skip_special_tokens=A_ )
__UpperCamelCase =[
'Hello, my dog is a little bit bigger than a little bit.',
'Today, I have a good idea of how to use the information',
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , [non_padded_sentence, padded_sentence] )
@slow
def _a ( self ) -> List[Any]:
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =BioGptModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase =3
__UpperCamelCase =input_dict['input_ids']
__UpperCamelCase =input_ids.ne(1 ).to(A_ )
__UpperCamelCase =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__UpperCamelCase =BioGptForSequenceClassification(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self ) -> Any:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase =3
__UpperCamelCase ='multi_label_classification'
__UpperCamelCase =input_dict['input_ids']
__UpperCamelCase =input_ids.ne(1 ).to(A_ )
__UpperCamelCase =ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__UpperCamelCase =BioGptForSequenceClassification(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> Tuple:
__UpperCamelCase =BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
__UpperCamelCase =torch.tensor([[2, 4805, 9, 656, 21]] )
__UpperCamelCase =model(A_ )[0]
__UpperCamelCase =42384
__UpperCamelCase =torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , A_ )
__UpperCamelCase =torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , A_ , atol=1E-4 ) )
@slow
def _a ( self ) -> Optional[int]:
__UpperCamelCase =BioGptTokenizer.from_pretrained('microsoft/biogpt' )
__UpperCamelCase =BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(A_ )
torch.manual_seed(0 )
__UpperCamelCase =tokenizer('COVID-19 is' , return_tensors='pt' ).to(A_ )
__UpperCamelCase =model.generate(
**A_ , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=A_ , )
__UpperCamelCase =tokenizer.decode(output_ids[0] , skip_special_tokens=A_ )
__UpperCamelCase =(
'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'
' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'
' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'
' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'
' more than 800,000 deaths.'
)
self.assertEqual(A_ , A_ )
| 62 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCAmelCase__ : Any = logging.get_logger(__name__)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : List[Any] = ['''pixel_values''']
def __init__( self : Optional[int] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_5_5 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : int = 8 , **lowerCAmelCase_ : Tuple , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_A: List[str] = do_rescale
_A: Any = rescale_factor
_A: List[Any] = do_pad
_A: Tuple = pad_size
def __magic_name__ ( self : int , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Any ):
"""simple docstring"""
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __magic_name__ ( self : Tuple , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None ):
"""simple docstring"""
_A , _A: Optional[int] = get_image_size(lowerCAmelCase_ )
_A: Union[str, Any] = (old_height // size + 1) * size - old_height
_A: Optional[Any] = (old_width // size + 1) * size - old_width
return pad(lowerCAmelCase_ , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=lowerCAmelCase_ )
def __magic_name__ ( self : str , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : int , ):
"""simple docstring"""
_A: List[str] = do_rescale if do_rescale is not None else self.do_rescale
_A: int = rescale_factor if rescale_factor is not None else self.rescale_factor
_A: str = do_pad if do_pad is not None else self.do_pad
_A: Union[str, Any] = pad_size if pad_size is not None else self.pad_size
_A: List[Any] = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
_A: Union[str, Any] = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_rescale:
_A: str = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_pad:
_A: str = [self.pad(lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
_A: Optional[Any] = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
_A: List[Any] = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 121 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCAmelCase__ : list[list] ) ->list[list]:
A__ : Tuple = current_set.copy()
for row_index, row in enumerate(UpperCAmelCase__ ):
A__ : Union[str, Any] = row[0]
for column_index, column in enumerate(UpperCAmelCase__ ):
if magnitude == 0:
A__ : int = column
continue
A__ : Tuple = column / magnitude
# Subtract to cancel term
A__ : int = current_set[0]
A__ : Any = [first_row]
A__ : Optional[Any] = current_set[1::]
for row in current_set:
A__ : Dict = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(UpperCAmelCase__ )
continue
for column_index in range(len(UpperCAmelCase__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(UpperCAmelCase__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
A__ : int = final_set[0]
A__ : Union[str, Any] = []
A__ : Dict = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
A__ : int = simplify(UpperCAmelCase__ )
for i in range(len(UpperCAmelCase__ ) ):
resultant[i].insert(0, current_first_column[i] )
resultant.insert(0, UpperCAmelCase__ )
A__ : int = resultant
return final_set
def _lowerCAmelCase ( UpperCAmelCase__ : list[list] ) ->list:
if len(UpperCAmelCase__ ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
A__ : Tuple = len(UpperCAmelCase__ ) + 1
if any(len(UpperCAmelCase__ ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(UpperCAmelCase__, (int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(UpperCAmelCase__ ) == 1:
return [equations[0][-1] / equations[0][0]]
A__ : str = equations.copy()
if any(0 in row for row in data_set ):
A__ : List[str] = data_set.copy()
A__ : Tuple = []
for row_index, row in enumerate(UpperCAmelCase__ ):
if 0 not in row:
A__ : str = data_set.pop(UpperCAmelCase__ )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0, UpperCAmelCase__ )
A__ : int = data_set.copy()
A__ : int = simplify(UpperCAmelCase__ )
A__ : int = simplified[::-1]
A__ : list = []
for row in simplified:
A__ : int = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
A__ : int = row.copy()[: len(UpperCAmelCase__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(UpperCAmelCase__ ) == 0:
solutions.append(0 )
continue
A__ : Dict = temp_row[1::]
A__ : Any = temp_row[::-1]
for column_index, column in enumerate(UpperCAmelCase__ ):
current_solution -= column * solutions[column_index]
solutions.append(UpperCAmelCase__ )
A__ : Optional[int] = []
for item in solutions:
final.append(float(round(UpperCAmelCase__, 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 353 |
"""simple docstring"""
import os
from distutils.util import strtobool
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Optional[Any] ) ->List[str]:
for e in env_keys:
A__ : List[Any] = int(os.environ.get(UpperCAmelCase__, -1 ) )
if val >= 0:
return val
return default
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : str=False ) ->List[str]:
A__ : List[Any] = os.environ.get(UpperCAmelCase__, str(UpperCAmelCase__ ) )
return strtobool(UpperCAmelCase__ ) == 1 # As its name indicates `strtobool` actually returns an int...
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[Any]="no" ) ->int:
A__ : str = os.environ.get(UpperCAmelCase__, str(UpperCAmelCase__ ) )
return value
| 296 | 0 |
"""simple docstring"""
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def _A ( lowercase , lowercase , lowercase=[] ):
"""simple docstring"""
a =size[0] - overlap_pixels * 2
a =size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
a =np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55
a =np.pad(lowercase , mode='''linear_ramp''' , pad_width=lowercase , end_values=0 )
if "l" in remove_borders:
a =mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
a =mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
a =mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
a =mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def _A ( lowercase , lowercase , lowercase ):
"""simple docstring"""
return max(lowercase , min(lowercase , lowercase ) )
def _A ( lowercase , lowercase , lowercase ):
"""simple docstring"""
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def _A ( lowercase , lowercase , lowercase ):
"""simple docstring"""
a =list(lowercase )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
a =clamp_rect(lowercase , [0, 0] , [image_size[0], image_size[1]] )
return rect
def _A ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
a =Image.new('''RGB''' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(lowercase , (original_slice, 0) )
return result
def _A ( lowercase , lowercase ):
"""simple docstring"""
a =(original_image_slice * 4, 0, tile.size[0], tile.size[1])
a =tile.crop(lowercase )
return tile
def _A ( lowercase , lowercase ):
"""simple docstring"""
a =n % d
return n - divisor
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A , __A , __A = 350 , ) -> int:
super().__init__(
vae=__A , text_encoder=__A , tokenizer=__A , unet=__A , low_res_scheduler=__A , scheduler=__A , max_noise_level=__A , )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A , __A , __A , __A , **__A ) -> Tuple:
torch.manual_seed(0 )
a =(
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
a =add_overlap_rect(__A , __A , image.size )
a =image.crop(__A )
a =((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
a =translated_slice_x - (original_image_slice / 2)
a =max(0 , __A )
a =squeeze_tile(__A , __A , __A , __A )
a =to_input.size
a =to_input.resize((tile_size, tile_size) , Image.BICUBIC )
a =super(__A , self ).__call__(image=__A , **__A ).images[0]
a =upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
a =unsqueeze_tile(__A , __A )
a =upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
a =[]
if x == 0:
remove_borders.append('''l''' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('''r''' )
if y == 0:
remove_borders.append('''t''' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('''b''' )
a =Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__A ) , mode='''L''' , )
final_image.paste(
__A , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __A )
@torch.no_grad()
def __call__( self , __A , __A , __A = 75 , __A = 9.0 , __A = 50 , __A = None , __A = 1 , __A = 0.0 , __A = None , __A = None , __A = None , __A = 1 , __A = 128 , __A = 32 , __A = 32 , ) -> Optional[int]:
a =Image.new('''RGB''' , (image.size[0] * 4, image.size[1] * 4) )
a =math.ceil(image.size[0] / tile_size )
a =math.ceil(image.size[1] / tile_size )
a =tcx * tcy
a =0
for y in range(__A ):
for x in range(__A ):
self._process_tile(
__A , __A , __A , __A , __A , __A , __A , prompt=__A , num_inference_steps=__A , guidance_scale=__A , noise_level=__A , negative_prompt=__A , num_images_per_prompt=__A , eta=__A , generator=__A , latents=__A , )
current_count += 1
if callback is not None:
callback({'''progress''': current_count / total_tile_count, '''image''': final_image} )
return final_image
def _A ( ):
"""simple docstring"""
# Run a demo
a ='''stabilityai/stable-diffusion-x4-upscaler'''
a =StableDiffusionTiledUpscalePipeline.from_pretrained(lowercase , revision='''fp16''' , torch_dtype=torch.floataa )
a =pipe.to('''cuda''' )
a =Image.open('''../../docs/source/imgs/diffusers_library.jpg''' )
def callback(lowercase ):
print(f'''progress: {obj["progress"]:.4f}''' )
obj["image"].save('''diffusers_library_progress.jpg''' )
a =pipe(image=lowercase , prompt='''Black font, white background, vector''' , noise_level=40 , callback=lowercase )
final_image.save('''diffusers_library.jpg''' )
if __name__ == "__main__":
main()
| 81 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : Optional[Any] = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 174 | 0 |
'''simple docstring'''
import numpy as np
class __magic_name__ :
def __init__( self) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] =(0, 0)
_UpperCAmelCase : Any =None
_UpperCAmelCase : List[str] =0
_UpperCAmelCase : Dict =0
_UpperCAmelCase : Dict =0
def __eq__( self , snake_case) -> Dict:
'''simple docstring'''
return self.position == cell.position
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
print(self.position)
class __magic_name__ :
def __init__( self , snake_case=(5, 5)) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[str] =np.zeros(snake_case)
_UpperCAmelCase : Optional[Any] =world_size[0]
_UpperCAmelCase : Any =world_size[1]
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
print(self.w)
def lowerCAmelCase ( self , snake_case) -> Any:
'''simple docstring'''
_UpperCAmelCase : str =[
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
_UpperCAmelCase : int =cell.position[0]
_UpperCAmelCase : Optional[Any] =cell.position[1]
_UpperCAmelCase : Dict =[]
for n in neughbour_cord:
_UpperCAmelCase : List[str] =current_x + n[0]
_UpperCAmelCase : int =current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
_UpperCAmelCase : List[Any] =Cell()
_UpperCAmelCase : Dict =(x, y)
_UpperCAmelCase : Any =cell
neighbours.append(snake_case)
return neighbours
def lowerCamelCase__ ( __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : int ):
'''simple docstring'''
_UpperCAmelCase : List[Any] =[]
_UpperCAmelCase : Union[str, Any] =[]
_open.append(__lowerCamelCase )
while _open:
_UpperCAmelCase : List[str] =np.argmin([n.f for n in _open] )
_UpperCAmelCase : Optional[int] =_open[min_f]
_closed.append(_open.pop(__lowerCamelCase ) )
if current == goal:
break
for n in world.get_neigbours(__lowerCamelCase ):
for c in _closed:
if c == n:
continue
_UpperCAmelCase : Tuple =current.g + 1
_UpperCAmelCase : Any =n.position
_UpperCAmelCase : List[str] =goal.position
_UpperCAmelCase : Optional[int] =(ya - ya) ** 2 + (xa - xa) ** 2
_UpperCAmelCase : Optional[Any] =n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(__lowerCamelCase )
_UpperCAmelCase : str =[]
while current.parent is not None:
path.append(current.position )
_UpperCAmelCase : Any =current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
lowercase =Gridworld()
# Start position and goal
lowercase =Cell()
lowercase =(0, 0)
lowercase =Cell()
lowercase =(4, 4)
print(F"""path from {start.position} to {goal.position}""")
lowercase =astar(world, start, goal)
# Just for visual reasons.
for i in s:
lowercase =1
print(world.w)
| 367 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
lowercase ='\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'
lowercase ='\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'
lowercase ='\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': {'id': datasets.Value('string'), 'prediction_text': datasets.Value('string')},
'references': {
'id': datasets.Value('string'),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string'),
'answer_start': datasets.Value('int32'),
}),
},
}) , codebase_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] , reference_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] , )
def lowerCAmelCase ( self , snake_case , snake_case) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[int] ={prediction['id']: prediction['prediction_text'] for prediction in predictions}
_UpperCAmelCase : Optional[int] =[
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
_UpperCAmelCase : List[Any] =evaluate(dataset=snake_case , predictions=snake_case)
return score
| 242 | 0 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A_ : Any = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class A_ ( _a , unittest.TestCase ):
'''simple docstring'''
a__ = XLMProphetNetTokenizer
a__ = False
a__ = True
def lowerCAmelCase_ (self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase = XLMProphetNetTokenizer(lowercase__ , keep_accents=lowercase__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = '''[PAD]'''
__UpperCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) , lowercase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) , lowercase__ )
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''[PAD]''' )
self.assertEqual(vocab_keys[1] , '''[CLS]''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(lowercase__ ) , 1_012 )
def lowerCAmelCase_ (self ) -> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 1_012 )
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = XLMProphetNetTokenizer(lowercase__ , keep_accents=lowercase__ )
__UpperCAmelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowercase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowercase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__UpperCAmelCase = tokenizer.convert_tokens_to_ids(lowercase__ )
self.assertListEqual(
lowercase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(lowercase__ )
self.assertListEqual(
lowercase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def lowerCAmelCase_ (self ) -> Tuple:
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = '''Hello World!'''
__UpperCAmelCase = [35_389, 6_672, 49, 2]
self.assertListEqual(lowercase__ , self.big_tokenizer.encode(lowercase__ ) )
@slow
def lowerCAmelCase_ (self ) -> Optional[int]:
# fmt: off
__UpperCAmelCase = {'''input_ids''': [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase__ , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
| 333 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Dict:
'''simple docstring'''
model.train()
__UpperCAmelCase = model(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = F.mse_loss(SCREAMING_SNAKE_CASE , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> List[Any]:
'''simple docstring'''
set_seed(4_2 )
__UpperCAmelCase = RegressionModel()
__UpperCAmelCase = deepcopy(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = RegressionDataset(length=8_0 )
__UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 )
model.to(accelerator.device )
if sched:
__UpperCAmelCase = AdamW(params=model.parameters() , lr=1e-3 )
__UpperCAmelCase = AdamW(params=ddp_model.parameters() , lr=1e-3 )
__UpperCAmelCase = LambdaLR(SCREAMING_SNAKE_CASE , lr_lambda=lambda SCREAMING_SNAKE_CASE : epoch**0.65 )
__UpperCAmelCase = LambdaLR(SCREAMING_SNAKE_CASE , lr_lambda=lambda SCREAMING_SNAKE_CASE : epoch**0.65 )
# Make a copy of `model`
if sched:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __a ( SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
# Test when on a single CPU or GPU that the context manager does nothing
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE )
# Use a single batch
__UpperCAmelCase , __UpperCAmelCase = next(iter(SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )]
def __a ( SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
# Test on distributed setup that context manager behaves properly
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE )
# Use a single batch
__UpperCAmelCase , __UpperCAmelCase = next(iter(SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )]
def __a ( SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = Accelerator(
split_batches=SCREAMING_SNAKE_CASE , dispatch_batches=SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase , __UpperCAmelCase = batch.values()
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(SCREAMING_SNAKE_CASE ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )]
GradientState._reset_state()
def __a ( SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = Accelerator(
split_batches=SCREAMING_SNAKE_CASE , dispatch_batches=SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase , __UpperCAmelCase = batch.values()
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(SCREAMING_SNAKE_CASE )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
__UpperCAmelCase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(SCREAMING_SNAKE_CASE ))
if accelerator.num_processes > 1:
check_model_parameters(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
GradientState._reset_state()
def __a ( ) -> str:
'''simple docstring'''
__UpperCAmelCase = Accelerator()
__UpperCAmelCase = RegressionDataset(length=8_0 )
__UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 )
__UpperCAmelCase = RegressionDataset(length=9_6 )
__UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 )
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(SCREAMING_SNAKE_CASE )
if iteration < len(SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(SCREAMING_SNAKE_CASE )
if batch_num < len(SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __a ( ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = Accelerator()
__UpperCAmelCase = accelerator.state
if state.local_process_index == 0:
print('''**Test `accumulate` gradient accumulation with dataloader break**''' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('''**Test NOOP `no_sync` context manager**''' )
test_noop_sync(SCREAMING_SNAKE_CASE )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('''**Test Distributed `no_sync` context manager**''' )
test_distributed_sync(SCREAMING_SNAKE_CASE )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 333 | 1 |
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def A__ ( __lowerCamelCase ):
return 1.0 / (1.0 + np.exp(-_outputs ))
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = np.max(_outputs, axis=-1, keepdims=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1, keepdims=__lowerCamelCase )
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="sigmoid"
UpperCAmelCase_ ="softmax"
UpperCAmelCase_ ="none"
@add_end_docstrings(
__SCREAMING_SNAKE_CASE , R"\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `\"default\"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `\"sigmoid\"`: Applies the sigmoid function on the output.\n - `\"softmax\"`: Applies the softmax function on the output.\n - `\"none\"`: Does not apply any function on the output.\n " , )
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =False
UpperCAmelCase_ =ClassificationFunction.NONE
def __init__( self , **_A ) -> Optional[Any]:
super().__init__(**_A )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def _UpperCamelCase ( self , _A=None , _A=None , _A="" , **_A ) -> Union[str, Any]:
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
SCREAMING_SNAKE_CASE_ = tokenizer_kwargs
SCREAMING_SNAKE_CASE_ = {}
if hasattr(self.model.config , '''return_all_scores''' ) and return_all_scores is None:
SCREAMING_SNAKE_CASE_ = self.model.config.return_all_scores
if isinstance(_A , _A ) or top_k is None:
SCREAMING_SNAKE_CASE_ = top_k
SCREAMING_SNAKE_CASE_ = False
elif return_all_scores is not None:
warnings.warn(
'''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'''
''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , _A , )
if return_all_scores:
SCREAMING_SNAKE_CASE_ = None
else:
SCREAMING_SNAKE_CASE_ = 1
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
SCREAMING_SNAKE_CASE_ = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *_A , **_A ) -> str:
SCREAMING_SNAKE_CASE_ = super().__call__(*_A , **_A )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
SCREAMING_SNAKE_CASE_ = '''top_k''' not in kwargs
if isinstance(args[0] , _A ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def _UpperCamelCase ( self , _A , **_A ) -> Dict[str, GenericTensor]:
SCREAMING_SNAKE_CASE_ = self.framework
if isinstance(_A , _A ):
return self.tokenizer(**_A , return_tensors=_A , **_A )
elif isinstance(_A , _A ) and len(_A ) == 1 and isinstance(inputs[0] , _A ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=_A , **_A )
elif isinstance(_A , _A ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'''
''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''' )
return self.tokenizer(_A , return_tensors=_A , **_A )
def _UpperCamelCase ( self , _A ) -> Tuple:
return self.model(**_A )
def _UpperCamelCase ( self , _A , _A=None , _A=1 , _A=True ) -> Optional[int]:
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
SCREAMING_SNAKE_CASE_ = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
SCREAMING_SNAKE_CASE_ = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , '''function_to_apply''' ) and function_to_apply is None:
SCREAMING_SNAKE_CASE_ = self.model.config.function_to_apply
else:
SCREAMING_SNAKE_CASE_ = ClassificationFunction.NONE
SCREAMING_SNAKE_CASE_ = model_outputs['''logits'''][0]
SCREAMING_SNAKE_CASE_ = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
SCREAMING_SNAKE_CASE_ = sigmoid(_A )
elif function_to_apply == ClassificationFunction.SOFTMAX:
SCREAMING_SNAKE_CASE_ = softmax(_A )
elif function_to_apply == ClassificationFunction.NONE:
SCREAMING_SNAKE_CASE_ = outputs
else:
raise ValueError(F'''Unrecognized `function_to_apply` argument: {function_to_apply}''' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
SCREAMING_SNAKE_CASE_ = [
{'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(_A )
]
if not _legacy:
dict_scores.sort(key=lambda _A : x["score"] , reverse=_A )
if top_k is not None:
SCREAMING_SNAKE_CASE_ = dict_scores[:top_k]
return dict_scores
| 257 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =None
UpperCAmelCase_ =None
@property
def _UpperCamelCase ( self ) -> Dict:
return self.feat_extract_tester.prepare_feat_extract_dict()
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_A , '''feature_size''' ) )
self.assertTrue(hasattr(_A , '''sampling_rate''' ) )
self.assertTrue(hasattr(_A , '''padding_value''' ) )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_A ) == len(_A ) for x, y in zip(_A , processed_features[input_name] ) ) )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_A )
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' )
SCREAMING_SNAKE_CASE_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_A )
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' )
SCREAMING_SNAKE_CASE_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_A )
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} , tensor_type='''tf''' )
SCREAMING_SNAKE_CASE_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def _UpperCamelCase ( self , _A=False ) -> Tuple:
def _inputs_have_equal_length(_A ):
SCREAMING_SNAKE_CASE_ = len(input[0] )
for input_slice in input[1:]:
if len(_A ) != length:
return False
return True
def _inputs_are_equal(_A , _A ):
if len(_A ) != len(_A ):
return False
for input_slice_a, input_slice_a in zip(_A , _A ):
if not np.allclose(np.asarray(_A ) , np.asarray(_A ) , atol=1E-3 ):
return False
return True
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_common(numpify=_A )
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.seq_length_diff
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.max_seq_length + pad_diff
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.min_seq_length
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.batch_size
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_A , padding=_A )
SCREAMING_SNAKE_CASE_ = input_a[input_name]
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_A , padding='''longest''' )
SCREAMING_SNAKE_CASE_ = input_a[input_name]
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_A , padding='''max_length''' , max_length=len(speech_inputs[-1] ) )
SCREAMING_SNAKE_CASE_ = input_a[input_name]
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_A , padding='''longest''' , return_tensors='''np''' )
SCREAMING_SNAKE_CASE_ = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_A ):
feat_extract.pad(_A , padding='''max_length''' )[input_name]
SCREAMING_SNAKE_CASE_ = feat_extract.pad(
_A , padding='''max_length''' , max_length=_A , return_tensors='''np''' )
SCREAMING_SNAKE_CASE_ = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_A ) )
self.assertTrue(_inputs_have_equal_length(_A ) )
self.assertTrue(_inputs_have_equal_length(_A ) )
self.assertTrue(_inputs_are_equal(_A , _A ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_A , pad_to_multiple_of=10 )
SCREAMING_SNAKE_CASE_ = input_a[input_name]
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_A , padding='''longest''' , pad_to_multiple_of=10 )
SCREAMING_SNAKE_CASE_ = input_a[input_name]
SCREAMING_SNAKE_CASE_ = feat_extract.pad(
_A , padding='''max_length''' , pad_to_multiple_of=10 , max_length=_A )
SCREAMING_SNAKE_CASE_ = input_a[input_name]
SCREAMING_SNAKE_CASE_ = feat_extract.pad(
_A , padding='''max_length''' , pad_to_multiple_of=10 , max_length=_A , return_tensors='''np''' , )
SCREAMING_SNAKE_CASE_ = input_a[input_name]
self.assertTrue(all(len(_A ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(_A , _A ) )
SCREAMING_SNAKE_CASE_ = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(_A ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
SCREAMING_SNAKE_CASE_ = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def _UpperCamelCase ( self , _A=False ) -> Optional[int]:
def _inputs_have_equal_length(_A ):
SCREAMING_SNAKE_CASE_ = len(input[0] )
for input_slice in input[1:]:
if len(_A ) != length:
return False
return True
def _inputs_are_equal(_A , _A ):
if len(_A ) != len(_A ):
return False
for input_slice_a, input_slice_a in zip(_A , _A ):
if not np.allclose(np.asarray(_A ) , np.asarray(_A ) , atol=1E-3 ):
return False
return True
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_common(numpify=_A )
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
SCREAMING_SNAKE_CASE_ = feat_extract.pad(
_A , padding='''max_length''' , max_length=len(speech_inputs[0] ) , truncation=_A )
SCREAMING_SNAKE_CASE_ = input_a[input_name]
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_A , padding='''max_length''' , max_length=len(speech_inputs[0] ) )
SCREAMING_SNAKE_CASE_ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_A ) )
self.assertFalse(_inputs_have_equal_length(_A ) )
# truncate to smallest with np
SCREAMING_SNAKE_CASE_ = feat_extract.pad(
_A , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' , truncation=_A , )
SCREAMING_SNAKE_CASE_ = input_a[input_name]
SCREAMING_SNAKE_CASE_ = feat_extract.pad(
_A , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' )
SCREAMING_SNAKE_CASE_ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_A ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_A ) )
# truncate to middle
SCREAMING_SNAKE_CASE_ = feat_extract.pad(
_A , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=_A , return_tensors='''np''' , )
SCREAMING_SNAKE_CASE_ = input_a[input_name]
SCREAMING_SNAKE_CASE_ = feat_extract.pad(
_A , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=_A )
SCREAMING_SNAKE_CASE_ = input_a[input_name]
SCREAMING_SNAKE_CASE_ = feat_extract.pad(
_A , padding='''max_length''' , max_length=len(speech_inputs[1] ) , return_tensors='''np''' )
SCREAMING_SNAKE_CASE_ = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(_A ) )
self.assertTrue(_inputs_have_equal_length(_A ) )
self.assertTrue(_inputs_are_equal(_A , _A ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_A ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_A ):
feat_extract.pad(_A , truncation=_A )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_A ):
feat_extract.pad(_A , padding='''longest''' , truncation=_A )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_A ):
feat_extract.pad(_A , padding='''longest''' , truncation=_A )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_A ):
feat_extract.pad(_A , padding='''max_length''' , truncation=_A )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
SCREAMING_SNAKE_CASE_ = 12
SCREAMING_SNAKE_CASE_ = feat_extract.pad(
_A , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_A , truncation=_A , )
SCREAMING_SNAKE_CASE_ = input_a[input_name]
SCREAMING_SNAKE_CASE_ = feat_extract.pad(
_A , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_A , )
SCREAMING_SNAKE_CASE_ = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
SCREAMING_SNAKE_CASE_ = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
SCREAMING_SNAKE_CASE_ = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(_A ) )
self.assertFalse(_inputs_have_equal_length(_A ) )
def _UpperCamelCase ( self ) -> Dict:
self._check_padding(numpify=_A )
def _UpperCamelCase ( self ) -> Dict:
self._check_padding(numpify=_A )
def _UpperCamelCase ( self ) -> List[str]:
self._check_truncation(numpify=_A )
def _UpperCamelCase ( self ) -> int:
self._check_truncation(numpify=_A )
@require_torch
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_A , padding='''longest''' , return_tensors='''np''' )[input_name]
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_A , padding='''longest''' , return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_A , padding='''longest''' , return_tensors='''np''' )[input_name]
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_A , padding='''longest''' , return_tensors='''tf''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = self.feat_extract_dict
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**_A )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE_ = [len(_A ) for x in speech_inputs]
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_A , padding='''longest''' , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _A )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _A )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.feat_extract_dict
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**_A )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE_ = [len(_A ) for x in speech_inputs]
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE_ = min(_A )
SCREAMING_SNAKE_CASE_ = feat_extract.pad(
_A , padding='''max_length''' , max_length=_A , truncation=_A , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _A )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 257 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""microsoft/biogpt""": """https://huggingface.co/microsoft/biogpt/resolve/main/config.json""",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Optional[Any] = '''biogpt'''
def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[Any]=4_2_3_8_4 , UpperCAmelCase__ : List[str]=1_0_2_4 , UpperCAmelCase__ : Optional[Any]=2_4 , UpperCAmelCase__ : Optional[int]=1_6 , UpperCAmelCase__ : List[Any]=4_0_9_6 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Union[str, Any]=1_0_2_4 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : Union[str, Any]=1E-12 , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Dict=0.0 , UpperCAmelCase__ : Any=1 , UpperCAmelCase__ : str=0 , UpperCAmelCase__ : Dict=2 , **UpperCAmelCase__ : List[str] , ) -> int:
lowerCAmelCase = vocab_size
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = scale_embedding
lowerCAmelCase = use_cache
lowerCAmelCase = layerdrop
lowerCAmelCase = activation_dropout
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
| 4 |
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__snake_case ="""\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
__snake_case ="""\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
__snake_case ="""
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : Tuple ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : Optional[int]=False ) -> int:
lowerCAmelCase = compute_bleu(
reference_corpus=UpperCAmelCase__ , translation_corpus=UpperCAmelCase__ , max_order=UpperCAmelCase__ , smooth=UpperCAmelCase__ )
((lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 4 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json",
}
class __snake_case ( _a):
snake_case__ : List[Any] = """data2vec-text"""
def __init__( self : str , __lowerCAmelCase : List[str]=3_0_5_2_2 , __lowerCAmelCase : List[str]=7_6_8 , __lowerCAmelCase : Optional[Any]=1_2 , __lowerCAmelCase : Union[str, Any]=1_2 , __lowerCAmelCase : Optional[Any]=3_0_7_2 , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Tuple=5_1_2 , __lowerCAmelCase : int=2 , __lowerCAmelCase : List[str]=0.02 , __lowerCAmelCase : List[Any]=1E-12 , __lowerCAmelCase : str=1 , __lowerCAmelCase : int=0 , __lowerCAmelCase : int=2 , __lowerCAmelCase : Optional[int]="absolute" , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : List[str]=None , **__lowerCAmelCase : str , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = vocab_size
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : Any = num_hidden_layers
_lowerCamelCase : Any = num_attention_heads
_lowerCamelCase : Dict = hidden_act
_lowerCamelCase : Optional[int] = intermediate_size
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : Optional[int] = attention_probs_dropout_prob
_lowerCamelCase : Tuple = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : Optional[Any] = initializer_range
_lowerCamelCase : Any = layer_norm_eps
_lowerCamelCase : Optional[int] = position_embedding_type
_lowerCamelCase : Optional[Any] = use_cache
_lowerCamelCase : Any = classifier_dropout
class __snake_case ( _a):
@property
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
if self.task == "multiple-choice":
_lowerCamelCase : str = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCamelCase : List[str] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 368 |
"""simple docstring"""
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = [
'''word_embeddings_layernorm.weight''',
'''word_embeddings_layernorm.bias''',
'''input_layernorm.weight''',
'''input_layernorm.bias''',
'''post_attention_layernorm.weight''',
'''post_attention_layernorm.bias''',
'''self_attention.dense.bias''',
'''mlp.dense_4h_to_h.bias''',
'''ln_f.weight''',
'''ln_f.bias''',
]
lowerCAmelCase__ = [
'''mlp.dense_4h_to_h.weight''',
'''self_attention.dense.weight''',
]
def snake_case_ ( A_ : int, A_ : Dict ):
'''simple docstring'''
_lowerCamelCase : List[Any] = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
_lowerCamelCase : Union[str, Any] = int(re.match(R'''.*layer_(\d*).*''', A_ )[1] )
layer_number -= 3
return F'''h.{layer_number}.''' + key
def snake_case_ ( A_ : List[Any] ):
'''simple docstring'''
if dtype == torch.bool:
return 1 / 8
_lowerCamelCase : List[str] = re.search(R'''[^\d](\d+)$''', str(A_ ) )
if bit_search is None:
raise ValueError(F'''`dtype` is not a valid dtype: {dtype}.''' )
_lowerCamelCase : Optional[Any] = int(bit_search.groups()[0] )
return bit_size // 8
def snake_case_ ( A_ : str, A_ : Any, A_ : int, A_ : List[str], A_ : Any ):
'''simple docstring'''
if bloom_config_file == "":
_lowerCamelCase : Dict = BloomConfig()
else:
_lowerCamelCase : Any = BloomConfig.from_json_file(A_ )
if shard_model:
_lowerCamelCase : Optional[int] = os.listdir(A_ )
_lowerCamelCase : List[str] = sorted(filter(lambda A_ : s.startswith('''layer''' ) and "model_00" in s, A_ ) )
_lowerCamelCase : str = {'''weight_map''': {}, '''metadata''': {}}
_lowerCamelCase : List[str] = 0
_lowerCamelCase : str = None
_lowerCamelCase : str = BloomConfig()
for j, file in enumerate(A_ ):
print('''Processing file: {}'''.format(A_ ) )
_lowerCamelCase : List[Any] = None
for i in range(A_ ):
# load all TP files
_lowerCamelCase : Any = file.replace('''model_00''', F'''model_0{i}''' )
_lowerCamelCase : Any = torch.load(os.path.join(A_, A_ ), map_location='''cpu''' )
# Rename keys in the transformers names
_lowerCamelCase : Optional[Any] = list(temp.keys() )
for key in keys:
_lowerCamelCase : List[Any] = temp.pop(A_ )
if tensors is None:
_lowerCamelCase : Any = temp
else:
for key in tensors.keys():
if any(key.endswith(A_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
_lowerCamelCase : List[Any] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
_lowerCamelCase : Optional[Any] = torch.cat([tensors[key], temp[key]], dim=A_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(A_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
_lowerCamelCase : Optional[Any] = tensors[key] / pretraining_tp
torch.save(
A_, os.path.join(
A_, '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ), str(len(A_ ) ).zfill(5 ) ), ), )
for key in tensors.keys():
_lowerCamelCase : str = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
_lowerCamelCase : Tuple = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ), str(len(A_ ) ).zfill(5 ) )
_lowerCamelCase : List[Any] = BloomConfig()
_lowerCamelCase : Tuple = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
_lowerCamelCase : Union[str, Any] = total_size
with open(A_, '''w''', encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(A_, WEIGHTS_NAME + '''.index.json''' ), '''w''', encoding='''utf-8''' ) as f:
_lowerCamelCase : Any = json.dumps(A_, indent=2, sort_keys=A_ ) + '''\n'''
f.write(A_ )
else:
_lowerCamelCase : Tuple = BloomModel(A_ )
_lowerCamelCase : Optional[int] = os.listdir(A_ )
_lowerCamelCase : Union[str, Any] = sorted(filter(lambda A_ : s.startswith('''layer''' ) and "model_00" in s, A_ ) )
_lowerCamelCase : int = None
for i, file in enumerate(A_ ):
_lowerCamelCase : Optional[int] = None
for i in range(A_ ):
# load all TP files
_lowerCamelCase : str = file.replace('''model_00''', F'''model_0{i}''' )
_lowerCamelCase : List[Any] = torch.load(os.path.join(A_, A_ ), map_location='''cpu''' )
# Rename keys in the transformers names
_lowerCamelCase : List[Any] = list(temp.keys() )
for key in keys:
_lowerCamelCase : Dict = temp.pop(A_ )
if tensors is None:
_lowerCamelCase : int = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(A_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
_lowerCamelCase : Dict = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
_lowerCamelCase : Optional[Any] = torch.cat([tensors[key], temp[key]], dim=A_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(A_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
_lowerCamelCase : List[Any] = tensors[key] / pretraining_tp
_lowerCamelCase : List[str] = model.load_state_dict(A_, strict=A_ )
assert not other_keys.unexpected_keys, F'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
_lowerCamelCase : Optional[Any] = set(other_keys.missing_keys )
else:
_lowerCamelCase : Union[str, Any] = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(A_, exist_ok=A_ )
_lowerCamelCase : Union[str, Any] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
_lowerCamelCase : Tuple = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
_lowerCamelCase : Dict = model.to(config.torch_dtype )
torch.save(model.state_dict(), A_ )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(A_, '''w''', encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bloom_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path to the Megatron-LM checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--bloom_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--shard_model''',
action='''store_true''',
help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''',
)
parser.add_argument(
'''--pretraining_tp''',
default=4,
type=int,
help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''',
)
lowerCAmelCase__ = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 175 | 0 |
"""simple docstring"""
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def snake_case ( A__ ,A__ ):
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
UpperCAmelCase_ : Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
UpperCAmelCase_ : List[Any] = torch.permute(A__ ,(0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(A__ ):
# linear layer
UpperCAmelCase_ : str = flax_key_tuple[:-1] + ("weight",)
UpperCAmelCase_ : Dict = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCAmelCase_ : List[str] = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def snake_case ( A__ ,A__ ,A__ ):
if "metadata" in layer:
UpperCAmelCase_ : List[str] = layer.split("metadata" )
UpperCAmelCase_ : Any = "".join(split_layer[0] )[:-1]
UpperCAmelCase_ : Any = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
UpperCAmelCase_ : int = layer.split("kvstore" )
UpperCAmelCase_ : Any = "".join(split_layer[0] )[:-1]
UpperCAmelCase_ : Tuple = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
UpperCAmelCase_ : Dict = layer.split("/" )
UpperCAmelCase_ : Optional[Any] = "/".join(split_layer[:-1] )
UpperCAmelCase_ : int = (split_layer[-1],)
if "kvstore/path" in layer:
UpperCAmelCase_ : int = F"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
UpperCAmelCase_ : int = "file"
else:
UpperCAmelCase_ : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : str = rename_keys(A__ )
UpperCAmelCase_ : Any = {}
for k, v in current_block.items():
UpperCAmelCase_ : Any = v
UpperCAmelCase_ : List[str] = new_current_block
torch.save(A__ ,A__ )
def snake_case ( A__ ,A__ ,A__ ,A__ ,A__ = WEIGHTS_NAME ):
UpperCAmelCase_ : Dict = convert_file_size_to_int(A__ )
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : List[str] = {}
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Any = 0
os.makedirs(A__ ,exist_ok=A__ )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" ,"rb" ) as fp:
UpperCAmelCase_ : str = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
UpperCAmelCase_ : Optional[Any] = flatten_dict(A__ ,sep="/" )
UpperCAmelCase_ : Optional[int] = {}
for layer in checkpoint_info.keys():
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = get_key_and_tensorstore_dict(
A__ ,A__ ,A__ )
if curr_real_layer_name in all_layers:
UpperCAmelCase_ : Union[str, Any] = content
else:
UpperCAmelCase_ : int = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
UpperCAmelCase_ : Optional[int] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
UpperCAmelCase_ : str = torch.tensor(A__ )
UpperCAmelCase_ : str = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = rename_base_flax_keys(tuple(key.split("/" ) ) ,A__ )
UpperCAmelCase_ : Any = "/".join(A__ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
UpperCAmelCase_ : Tuple = os.path.join(
A__ ,weights_name.replace(".bin" ,F"""-{len(A__ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(A__ ,A__ )
sharded_state_dicts.append(current_block.keys() )
del current_block
UpperCAmelCase_ : Any = {}
UpperCAmelCase_ : Tuple = 0
UpperCAmelCase_ : Tuple = raw_weights.to(getattr(A__ ,A__ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
UpperCAmelCase_ : Optional[int] = os.path.join(A__ ,weights_name.replace(".bin" ,F"""-{len(A__ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(A__ ,A__ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(A__ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
UpperCAmelCase_ : Tuple = {}
UpperCAmelCase_ : Tuple = {}
for idx, shard in enumerate(A__ ):
UpperCAmelCase_ : Any = weights_name.replace(
".bin" ,F"""-{idx+1:05d}-of-{len(A__ ):05d}.bin""" ) # len(sharded_state_dicts):05d}
UpperCAmelCase_ : Tuple = os.path.join(A__ ,weights_name.replace(".bin" ,F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(A__ ,os.path.join(A__ ,A__ ) )
UpperCAmelCase_ : Dict = shard
for key in shard:
UpperCAmelCase_ : Union[str, Any] = shard_file
# Add the metadata
UpperCAmelCase_ : Dict = {"total_size": total_size}
UpperCAmelCase_ : Any = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(A__ ,A__ ) ,"w" ,encoding="utf-8" ) as f:
UpperCAmelCase_ : int = json.dumps(A__ ,indent=2 ,sort_keys=A__ ) + "\n"
f.write(A__ )
return metadata, index
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--max_shard_size''', default='''10GB''', required=False, help='''Max shard size''')
parser.add_argument('''--dtype''', default='''bfloat16''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
lowerCamelCase_ = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def snake_case ( ):
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
UpperCAmelCase_ : Optional[int] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
UpperCAmelCase_ : Any = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" ,device_map="auto" )
UpperCAmelCase_ : Dict = TaTokenizer.from_pretrained("t5-small" )
UpperCAmelCase_ : Optional[int] = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
UpperCAmelCase_ : Optional[Any] = tokenizer(A__ ,return_tensors="pt" ).input_ids
UpperCAmelCase_ : Tuple = model.generate(A__ ,decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 268 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class UpperCamelCase_ :
# setable values
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None # sigma(t_i)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] ) -> Optional[Any]:
return cls()
@dataclass
class UpperCamelCase_ (__A ):
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 42
class UpperCamelCase_ (__A , __A ):
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
return True
@register_to_config
def __init__( self : List[str] , lowerCAmelCase_ : float = 0.0_2 , lowerCAmelCase_ : float = 100 , lowerCAmelCase_ : float = 1.0_0_7 , lowerCAmelCase_ : float = 80 , lowerCAmelCase_ : float = 0.0_5 , lowerCAmelCase_ : float = 50 , ) -> Union[str, Any]:
pass
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
return KarrasVeSchedulerState.create()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple = () ) -> KarrasVeSchedulerState:
UpperCAmelCase_ : Dict = jnp.arange(0 , lowerCAmelCase_ )[::-1].copy()
UpperCAmelCase_ : Dict = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=lowerCAmelCase_ , schedule=jnp.array(lowerCAmelCase_ , dtype=jnp.floataa ) , timesteps=lowerCAmelCase_ , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : random.KeyArray , ) -> Tuple[jnp.ndarray, float]:
if self.config.s_min <= sigma <= self.config.s_max:
UpperCAmelCase_ : Any = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
UpperCAmelCase_ : Optional[int] = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCAmelCase_ : List[Any] = random.split(lowerCAmelCase_ , num=1 )
UpperCAmelCase_ : List[str] = self.config.s_noise * random.normal(key=lowerCAmelCase_ , shape=sample.shape )
UpperCAmelCase_ : Optional[Any] = sigma + gamma * sigma
UpperCAmelCase_ : Any = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
UpperCAmelCase_ : Union[str, Any] = sample_hat + sigma_hat * model_output
UpperCAmelCase_ : List[Any] = (sample_hat - pred_original_sample) / sigma_hat
UpperCAmelCase_ : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , state=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
UpperCAmelCase_ : str = sample_prev + sigma_prev * model_output
UpperCAmelCase_ : Any = (sample_prev - pred_original_sample) / sigma_prev
UpperCAmelCase_ : int = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , state=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] ) -> Dict:
raise NotImplementedError()
| 268 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase ) -> int:
if not numbers:
return 0
if not isinstance(lowercase , (list, tuple) ) or not all(
isinstance(lowercase , lowercase ) for number in numbers ):
raise ValueError("""numbers must be an iterable of integers""" )
__lowerCAmelCase = __lowerCAmelCase = __lowerCAmelCase = numbers[0]
for i in range(1 , len(lowercase ) ):
# update the maximum and minimum subarray products
__lowerCAmelCase = numbers[i]
if number < 0:
__lowerCAmelCase , __lowerCAmelCase = min_till_now, max_till_now
__lowerCAmelCase = max(lowercase , max_till_now * number )
__lowerCAmelCase = min(lowercase , min_till_now * number )
# update the maximum product found till now
__lowerCAmelCase = max(lowercase , lowercase )
return max_prod
| 46 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
a : Optional[int] =KandinskyVaaImgaImgPipeline
a : List[Any] =["""image_embeds""", """negative_image_embeds""", """image"""]
a : Union[str, Any] =[
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
a : Optional[int] =[
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a : Optional[Any] =False
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 32
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 32
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 1_00
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__lowerCAmelCase = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE )
return model
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.dummy_unet
__lowerCAmelCase = self.dummy_movq
__lowerCAmelCase = {
"""num_train_timesteps""": 10_00,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
__lowerCAmelCase = DDIMScheduler(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0 ):
'''simple docstring'''
__lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size),rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size),rng=random.Random(seed + 1 ) ).to(
__SCREAMING_SNAKE_CASE )
# create init_image
__lowerCAmelCase = floats_tensor((1, 3, 64, 64),rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = image.cpu().permute(0,2,3,1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE ) ).convert("""RGB""" ).resize((2_56, 2_56) )
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """cpu"""
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = output.images
__lowerCAmelCase = pipe(
**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ),return_dict=__SCREAMING_SNAKE_CASE,)[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = np.array(
[0.619_9778, 0.6398_4406, 0.4614_5785, 0.6294_4984, 0.562_2215, 0.4730_6132, 0.4744_1456, 0.460_7606, 0.4871_9263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
__lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__lowerCAmelCase = """A red cartoon frog, 4k"""
__lowerCAmelCase = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""",torch_dtype=torch.floataa )
pipe_prior.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""",torch_dtype=torch.floataa )
__lowerCAmelCase = pipeline.to(__SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase , __lowerCAmelCase = pipe_prior(
__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=5,negative_prompt="""""",).to_tuple()
__lowerCAmelCase = pipeline(
image=__SCREAMING_SNAKE_CASE,image_embeds=__SCREAMING_SNAKE_CASE,negative_image_embeds=__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=1_00,height=7_68,width=7_68,strength=0.2,output_type="""np""",)
__lowerCAmelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
| 46 | 1 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> float:
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 177 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
__A = {
"configuration_speecht5": [
"SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP",
"SpeechT5Config",
"SpeechT5HifiGanConfig",
],
"feature_extraction_speecht5": ["SpeechT5FeatureExtractor"],
"processing_speecht5": ["SpeechT5Processor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["SpeechT5Tokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"SpeechT5ForSpeechToText",
"SpeechT5ForSpeechToSpeech",
"SpeechT5ForTextToSpeech",
"SpeechT5Model",
"SpeechT5PreTrainedModel",
"SpeechT5HifiGan",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 177 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 16 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _A :
@staticmethod
def __A ( *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class _A ( unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
__UpperCAmelCase : Optional[int] = [
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
]
return object_detector, examples
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = object_detector(examples[0] , threshold=0.0 )
__UpperCAmelCase : Tuple = len(__UpperCAmelCase )
self.assertGreater(__UpperCAmelCase , 0 )
self.assertEqual(
__UpperCAmelCase , [
{
"""score""": ANY(__UpperCAmelCase ),
"""label""": ANY(__UpperCAmelCase ),
"""box""": {"""xmin""": ANY(__UpperCAmelCase ), """ymin""": ANY(__UpperCAmelCase ), """xmax""": ANY(__UpperCAmelCase ), """ymax""": ANY(__UpperCAmelCase )},
}
for i in range(__UpperCAmelCase )
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def __A ( self ) -> Tuple:
'''simple docstring'''
pass
@require_torch
def __A ( self ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
__UpperCAmelCase : Optional[int] = object_detector(
"""./tests/fixtures/tests_samples/COCO/000000039769.png""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
{"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}},
{"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
] , )
__UpperCAmelCase : str = object_detector(
[
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
{"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}},
{"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
]
] , )
@require_torch
@slow
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Tuple = pipeline("""zero-shot-object-detection""" )
__UpperCAmelCase : List[Any] = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
] , )
__UpperCAmelCase : Any = object_detector(
[
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
] , )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
],
[
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
],
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def __A ( self ) -> List[str]:
'''simple docstring'''
pass
@require_torch
@slow
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = 0.2
__UpperCAmelCase : List[Any] = pipeline("""zero-shot-object-detection""" )
__UpperCAmelCase : Optional[int] = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=__UpperCAmelCase , )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
] , )
@require_torch
@slow
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = 2
__UpperCAmelCase : Optional[int] = pipeline("""zero-shot-object-detection""" )
__UpperCAmelCase : List[Any] = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , top_k=__UpperCAmelCase , )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
] , )
| 16 | 1 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCamelCase__ = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def a__ ( lowerCAmelCase__ ) -> Optional[int]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> Dict:
from transformers.testing_utils import pytest_terminal_summary_main
UpperCAmelCase__ : Optional[int] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(lowerCAmelCase__ , id=lowerCAmelCase__ )
| 181 |
'''simple docstring'''
def a__ ( lowerCAmelCase__ ) -> int:
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(lowerCAmelCase__ ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 181 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = KandinskyInpaintPipeline
lowerCamelCase = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
lowerCamelCase = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
lowerCamelCase = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
lowerCamelCase = False
@property
def _lowerCAmelCase ( self ) -> Optional[int]:
return 32
@property
def _lowerCAmelCase ( self ) -> List[str]:
return 32
@property
def _lowerCAmelCase ( self ) -> Optional[int]:
return self.time_input_dim
@property
def _lowerCAmelCase ( self ) -> List[str]:
return self.time_input_dim * 4
@property
def _lowerCAmelCase ( self ) -> int:
return 1_00
@property
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase =XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def _lowerCAmelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
_lowerCAmelCase =MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
_lowerCAmelCase =MultilingualCLIP(__UpperCAmelCase )
_lowerCAmelCase =text_encoder.eval()
return text_encoder
@property
def _lowerCAmelCase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
_lowerCAmelCase ={
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_lowerCAmelCase =UNetaDConditionModel(**__UpperCAmelCase )
return model
@property
def _lowerCAmelCase ( self ) -> Optional[Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _lowerCAmelCase ( self ) -> int:
torch.manual_seed(0 )
_lowerCAmelCase =VQModel(**self.dummy_movq_kwargs )
return model
def _lowerCAmelCase ( self ) -> Optional[Any]:
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =self.dummy_tokenizer
_lowerCAmelCase =self.dummy_unet
_lowerCAmelCase =self.dummy_movq
_lowerCAmelCase =DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="""linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__UpperCAmelCase , )
_lowerCAmelCase ={
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ) -> Any:
_lowerCAmelCase =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
_lowerCAmelCase =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__UpperCAmelCase )
# create init_image
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
_lowerCAmelCase =image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create mask
_lowerCAmelCase =np.ones((64, 64) , dtype=np.floataa )
_lowerCAmelCase =0
if str(__UpperCAmelCase ).startswith("""mps""" ):
_lowerCAmelCase =torch.manual_seed(__UpperCAmelCase )
else:
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
_lowerCAmelCase ={
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase ="""cpu"""
_lowerCAmelCase =self.get_dummy_components()
_lowerCAmelCase =self.pipeline_class(**__UpperCAmelCase )
_lowerCAmelCase =pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase =pipe(**self.get_dummy_inputs(__UpperCAmelCase ) )
_lowerCAmelCase =output.images
_lowerCAmelCase =pipe(
**self.get_dummy_inputs(__UpperCAmelCase ) , return_dict=__UpperCAmelCase , )[0]
_lowerCAmelCase =image[0, -3:, -3:, -1]
_lowerCAmelCase =image_from_tuple[0, -3:, -3:, -1]
print(f'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase =np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def _lowerCAmelCase ( self ) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_lowerCAmelCase =np.ones((7_68, 7_68) , dtype=np.floataa )
_lowerCAmelCase =0
_lowerCAmelCase ="""a hat"""
_lowerCAmelCase =KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__UpperCAmelCase )
_lowerCAmelCase =KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
_lowerCAmelCase =pipeline.to(__UpperCAmelCase )
pipeline.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase =torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase =pipe_prior(
__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_lowerCAmelCase =pipeline(
__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , image_embeds=__UpperCAmelCase , negative_image_embeds=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="""np""" , )
_lowerCAmelCase =output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
| 341 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = XGLMConfig
lowerCamelCase = {}
lowerCamelCase = '''gelu'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=14 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=0.0_2 , ) -> List[str]:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =seq_length
_lowerCAmelCase =is_training
_lowerCAmelCase =use_input_mask
_lowerCAmelCase =use_labels
_lowerCAmelCase =vocab_size
_lowerCAmelCase =d_model
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =ffn_dim
_lowerCAmelCase =activation_function
_lowerCAmelCase =activation_dropout
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =initializer_range
_lowerCAmelCase =None
_lowerCAmelCase =0
_lowerCAmelCase =2
_lowerCAmelCase =1
def _lowerCAmelCase ( self ) -> Dict:
return XGLMConfig.from_pretrained("""facebook/xglm-564M""" )
def _lowerCAmelCase ( self ) -> str:
_lowerCAmelCase =tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
_lowerCAmelCase =None
if self.use_input_mask:
_lowerCAmelCase =random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase =self.get_config()
_lowerCAmelCase =floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _lowerCAmelCase ( self ) -> str:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__UpperCAmelCase , )
def _lowerCAmelCase ( self ) -> Dict:
_lowerCAmelCase =self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) =config_and_inputs
_lowerCAmelCase ={
"""input_ids""": input_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
lowerCamelCase = (TFXGLMForCausalLM,) if is_tf_available() else ()
lowerCamelCase = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =TFXGLMModelTester(self )
_lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase , n_embd=37 )
def _lowerCAmelCase ( self ) -> int:
self.config_tester.run_common_tests()
@slow
def _lowerCAmelCase ( self ) -> Union[str, Any]:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase =TFXGLMModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
super().test_resize_token_embeddings()
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self , __UpperCAmelCase=True ) -> str:
_lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase =tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
_lowerCAmelCase =[2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81]
# fmt: on
_lowerCAmelCase =model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __UpperCAmelCase )
@slow
def _lowerCAmelCase ( self ) -> Optional[Any]:
_lowerCAmelCase =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
tf.random.set_seed(0 )
_lowerCAmelCase =tokenizer("""Today is a nice day and""" , return_tensors="""tf""" )
_lowerCAmelCase =tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(""":/CPU:0""" ):
_lowerCAmelCase =model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , seed=[7, 0] )
_lowerCAmelCase =tokenizer.decode(output_ids[0] , skip_special_tokens=__UpperCAmelCase )
_lowerCAmelCase =(
"""Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"""
)
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
@slow
def _lowerCAmelCase ( self ) -> Union[str, Any]:
_lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase ="""left"""
# use different length sentences to test batching
_lowerCAmelCase =[
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When""",
"""Hello, my dog is a little""",
]
_lowerCAmelCase =tokenizer(__UpperCAmelCase , return_tensors="""tf""" , padding=__UpperCAmelCase )
_lowerCAmelCase =inputs["""input_ids"""]
_lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 )
_lowerCAmelCase =tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
_lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 )
_lowerCAmelCase =tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
_lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 )
_lowerCAmelCase =tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
_lowerCAmelCase =tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCAmelCase )
_lowerCAmelCase =tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCAmelCase )
_lowerCAmelCase =[
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """
"""a single""",
"""Hello, my dog is a little bit of a shy one, but he is very friendly""",
]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , [non_padded_sentence, padded_sentence] )
| 341 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=[1, 1, 2] , __a=1 , __a=32 , __a=4 , __a=8 , __a=37 , __a="gelu_new" , __a=0.1 , __a=0.1 , __a=0.0 , __a=5_12 , __a=3 , __a=0.0_2 , __a=3 , __a=4 , __a=None , __a=False , ):
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = block_sizes
__lowerCAmelCase = num_decoder_layers
__lowerCAmelCase = d_model
__lowerCAmelCase = n_head
__lowerCAmelCase = d_head
__lowerCAmelCase = d_inner
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = 2
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = scope
__lowerCAmelCase = initializer_std
# Used in the tests to check the size of the first attention layer
__lowerCAmelCase = n_head
# Used in the tests to check the size of the first hidden state
__lowerCAmelCase = self.d_model
# Used in the tests to check the number of output hidden states/attentions
__lowerCAmelCase = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
__lowerCAmelCase = self.num_hidden_layers + 2
def snake_case ( self ):
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a , ):
__lowerCAmelCase = TFFunnelModel(config=__a )
__lowerCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__lowerCAmelCase = model(__a )
__lowerCAmelCase = [input_ids, input_mask]
__lowerCAmelCase = model(__a )
__lowerCAmelCase = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__lowerCAmelCase = False
__lowerCAmelCase = TFFunnelModel(config=__a )
__lowerCAmelCase = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__lowerCAmelCase = False
__lowerCAmelCase = TFFunnelModel(config=__a )
__lowerCAmelCase = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a , ):
__lowerCAmelCase = TFFunnelBaseModel(config=__a )
__lowerCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__lowerCAmelCase = model(__a )
__lowerCAmelCase = [input_ids, input_mask]
__lowerCAmelCase = model(__a )
__lowerCAmelCase = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
__lowerCAmelCase = False
__lowerCAmelCase = TFFunnelBaseModel(config=__a )
__lowerCAmelCase = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
__lowerCAmelCase = False
__lowerCAmelCase = TFFunnelBaseModel(config=__a )
__lowerCAmelCase = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a , ):
__lowerCAmelCase = TFFunnelForPreTraining(config=__a )
__lowerCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__lowerCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a , ):
__lowerCAmelCase = TFFunnelForMaskedLM(config=__a )
__lowerCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__lowerCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a , ):
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = TFFunnelForSequenceClassification(config=__a )
__lowerCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__lowerCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a , ):
__lowerCAmelCase = self.num_choices
__lowerCAmelCase = TFFunnelForMultipleChoice(config=__a )
__lowerCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
__lowerCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a , ):
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = TFFunnelForTokenClassification(config=__a )
__lowerCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__lowerCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a , ):
__lowerCAmelCase = TFFunnelForQuestionAnswering(config=__a )
__lowerCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__lowerCAmelCase = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self ):
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _UpperCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str =(
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
__UpperCAmelCase : Tuple =(
{
"""feature-extraction""": (TFFunnelBaseModel, TFFunnelModel),
"""fill-mask""": TFFunnelForMaskedLM,
"""question-answering""": TFFunnelForQuestionAnswering,
"""text-classification""": TFFunnelForSequenceClassification,
"""token-classification""": TFFunnelForTokenClassification,
"""zero-shot""": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : Optional[Any] =False
__UpperCAmelCase : List[str] =False
def snake_case ( self ):
__lowerCAmelCase = TFFunnelModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=__a )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
@require_tf
class _UpperCamelCase ( lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[str] =(
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
__UpperCAmelCase : Tuple =False
__UpperCAmelCase : Any =False
def snake_case ( self ):
__lowerCAmelCase = TFFunnelModelTester(self , base=__a )
__lowerCAmelCase = ConfigTester(self , config_class=__a )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*__a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a )
| 57 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class a ( metaclass=UpperCAmelCase__ ):
UpperCamelCase : Optional[int] = ['torch', 'torchsde']
def __init__( self : Union[str, Any] , *lowerCAmelCase : Any , **lowerCAmelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""torch""", """torchsde"""] )
@classmethod
def lowerCamelCase__ ( cls : Union[str, Any] , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Tuple ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch""", """torchsde"""] )
@classmethod
def lowerCamelCase__ ( cls : Union[str, Any] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Any ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["""torch""", """torchsde"""] )
| 173 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
lowerCamelCase : Any = logging.get_logger(__name__)
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""pixel_values"""]
def __init__( self , A = True , A = None , A = PILImageResampling.BILINEAR , A = True , A = 1 / 2_5_5 , A = True , A = None , A = True , **A , ) -> None:
super().__init__(**A )
snake_case : Optional[Any] = size if size is not None else {"""shortest_edge""": 2_2_4}
snake_case : Optional[Any] = get_size_dict(A , default_to_square=A )
snake_case : Tuple = crop_size if crop_size is not None else {"""height""": 2_5_6, """width""": 2_5_6}
snake_case : Any = get_size_dict(A , param_name="""crop_size""" )
snake_case : int = do_resize
snake_case : Tuple = size
snake_case : Optional[int] = resample
snake_case : Tuple = do_rescale
snake_case : Optional[int] = rescale_factor
snake_case : str = do_center_crop
snake_case : Union[str, Any] = crop_size
snake_case : Tuple = do_flip_channel_order
def UpperCAmelCase ( self , A , A , A = PIL.Image.BILINEAR , A = None , **A , ) -> np.ndarray:
snake_case : Union[str, Any] = get_size_dict(A , default_to_square=A )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
snake_case : Any = get_resize_output_image_size(A , size=size["""shortest_edge"""] , default_to_square=A )
return resize(A , size=A , resample=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> np.ndarray:
snake_case : Optional[Any] = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(A , size=(size["""height"""], size["""width"""]) , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> str:
return rescale(A , scale=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A = None ) -> np.ndarray:
return flip_channel_order(A , data_format=A )
def UpperCAmelCase ( self , A , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image:
snake_case : Dict = do_resize if do_resize is not None else self.do_resize
snake_case : Optional[Any] = resample if resample is not None else self.resample
snake_case : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case : int = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : int = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
snake_case : List[str] = size if size is not None else self.size
snake_case : List[Any] = get_size_dict(A , default_to_square=A )
snake_case : int = crop_size if crop_size is not None else self.crop_size
snake_case : Union[str, Any] = get_size_dict(A , param_name="""crop_size""" )
snake_case : Dict = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
snake_case : List[Any] = [to_numpy_array(A ) for image in images]
if do_resize:
snake_case : Optional[Any] = [self.resize(image=A , size=A , resample=A ) for image in images]
if do_center_crop:
snake_case : List[str] = [self.center_crop(image=A , size=A ) for image in images]
if do_rescale:
snake_case : Dict = [self.rescale(image=A , scale=A ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
snake_case : int = [self.flip_channel_order(image=A ) for image in images]
snake_case : Optional[int] = [to_channel_dimension_format(A , A ) for image in images]
snake_case : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=A , tensor_type=A )
def UpperCAmelCase ( self , A , A = None ) -> Optional[int]:
snake_case : str = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A ) != len(A ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(A ):
snake_case : Tuple = target_sizes.numpy()
snake_case : str = []
for idx in range(len(A ) ):
snake_case : Optional[int] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=A )
snake_case : int = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A )
else:
snake_case : Optional[Any] = logits.argmax(dim=1 )
snake_case : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 176 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : List[Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Dict = SwinConfig.from_pretrained(
"""microsoft/swin-tiny-patch4-window7-224""" ,out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
snake_case : List[str] = MaskFormerConfig(backbone_config=lowercase )
snake_case : List[Any] = """huggingface/label-files"""
if "ade20k-full" in model_name:
# this should be ok
snake_case : Dict = 847
snake_case : List[str] = """maskformer-ade20k-full-id2label.json"""
elif "ade" in model_name:
# this should be ok
snake_case : Union[str, Any] = 150
snake_case : List[Any] = """ade20k-id2label.json"""
elif "coco-stuff" in model_name:
# this should be ok
snake_case : Union[str, Any] = 171
snake_case : int = """maskformer-coco-stuff-id2label.json"""
elif "coco" in model_name:
# TODO
snake_case : Optional[Any] = 133
snake_case : Optional[Any] = """coco-panoptic-id2label.json"""
elif "cityscapes" in model_name:
# this should be ok
snake_case : Tuple = 19
snake_case : int = """cityscapes-id2label.json"""
elif "vistas" in model_name:
# this should be ok
snake_case : int = 65
snake_case : Any = """mapillary-vistas-id2label.json"""
snake_case : Optional[Any] = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) )
snake_case : List[Any] = {int(lowercase ): v for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
snake_case : Union[str, Any] = []
# stem
# fmt: off
rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.layers.{i}.downsample.reduction.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") )
for source_index, target_index in zip(range(3 ,0 ,-1 ) ,range(0 ,3 ) ):
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") )
rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") )
# heads on top
rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") )
for i in range(3 ):
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", f"""mask_embedder.{i}.0.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", f"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> str:
snake_case : Tuple = dct.pop(lowercase )
snake_case : int = val
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> int:
snake_case : List[str] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
snake_case : Optional[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
snake_case : Tuple = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
snake_case : Optional[Any] = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case : Optional[Any] = in_proj_weight[:dim, :]
snake_case : Optional[int] = in_proj_bias[: dim]
snake_case : Union[str, Any] = in_proj_weight[
dim : dim * 2, :
]
snake_case : Tuple = in_proj_bias[
dim : dim * 2
]
snake_case : List[Any] = in_proj_weight[
-dim :, :
]
snake_case : Any = in_proj_bias[-dim :]
# fmt: on
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Any:
# fmt: off
snake_case : int = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
snake_case : Any = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
snake_case : Tuple = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case : Optional[int] = in_proj_weight[: hidden_size, :]
snake_case : Any = in_proj_bias[:config.hidden_size]
snake_case : Any = in_proj_weight[hidden_size : hidden_size * 2, :]
snake_case : int = in_proj_bias[hidden_size : hidden_size * 2]
snake_case : Any = in_proj_weight[-hidden_size :, :]
snake_case : Union[str, Any] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
snake_case : Dict = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
snake_case : str = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case : Dict = in_proj_weight[: hidden_size, :]
snake_case : Dict = in_proj_bias[:config.hidden_size]
snake_case : List[Any] = in_proj_weight[hidden_size : hidden_size * 2, :]
snake_case : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2]
snake_case : Tuple = in_proj_weight[-hidden_size :, :]
snake_case : str = in_proj_bias[-hidden_size :]
# fmt: on
def SCREAMING_SNAKE_CASE__ ( ) -> torch.Tensor:
snake_case : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case : Any = Image.open(requests.get(lowercase ,stream=lowercase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase = False ) -> Dict:
snake_case : List[str] = get_maskformer_config(lowercase )
# load original state_dict
with open(lowercase ,"""rb""" ) as f:
snake_case : Optional[Any] = pickle.load(lowercase )
snake_case : Optional[Any] = data["""model"""]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
snake_case : str = create_rename_keys(lowercase )
for src, dest in rename_keys:
rename_key(lowercase ,lowercase ,lowercase )
read_in_swin_q_k_v(lowercase ,config.backbone_config )
read_in_decoder_q_k_v(lowercase ,lowercase )
# update to torch tensors
for key, value in state_dict.items():
snake_case : List[Any] = torch.from_numpy(lowercase )
# load 🤗 model
snake_case : int = MaskFormerForInstanceSegmentation(lowercase )
model.eval()
for name, param in model.named_parameters():
print(lowercase ,param.shape )
snake_case , snake_case : Optional[int] = model.load_state_dict(lowercase ,strict=lowercase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowercase ) == 0, f"""Unexpected keys: {unexpected_keys}"""
# verify results
snake_case : List[str] = prepare_img()
if "vistas" in model_name:
snake_case : Optional[int] = 65
elif "cityscapes" in model_name:
snake_case : int = 65535
else:
snake_case : List[str] = 255
snake_case : List[Any] = True if """ade""" in model_name else False
snake_case : Optional[int] = MaskFormerImageProcessor(ignore_index=lowercase ,reduce_labels=lowercase )
snake_case : Tuple = image_processor(lowercase ,return_tensors="""pt""" )
snake_case : str = model(**lowercase )
print("""Logits:""" ,outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
snake_case : Union[str, Any] = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] ,lowercase ,atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
image_processor.save_pretrained(lowercase )
if push_to_hub:
print("""Pushing model and image processor to the hub...""" )
model.push_to_hub(f"""nielsr/{model_name}""" )
image_processor.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCamelCase : List[str] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 176 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : List[Any] , *__a : Optional[Any] , **__a : Tuple ):
warnings.warn(
"The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use DeformableDetrImageProcessor instead." , __a , )
super().__init__(*__a , **__a )
| 63 |
'''simple docstring'''
from ....utils import logging
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Tuple , __a : int , __a : Any=None , __a : Optional[int]=20_48 ):
_a = config.__dict__
_a = modal_hidden_size
if num_labels:
_a = num_labels
| 63 | 1 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__snake_case ="""\
Text data.
Second line of data."""
__snake_case ="""file"""
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase : Optional[int] ):
lowerCAmelCase = tmp_path_factory.mktemp('data' ) / (FILE_PATH + '.zstd')
lowerCAmelCase = bytes(lowerCamelCase , 'utf-8' )
with zstd.open(lowerCamelCase , 'wb' ) as f:
f.write(lowerCamelCase )
return path
@pytest.fixture
def a_ ( lowerCamelCase : Union[str, Any] ):
with open(os.path.join(tmpfs.local_root_dir , lowerCamelCase ) , 'w' ) as f:
f.write(lowerCamelCase )
return FILE_PATH
@pytest.mark.parametrize('compression_format' , ['gzip', 'xz', 'zstd'] )
def a_ ( lowerCamelCase : int , lowerCamelCase : List[str] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : List[str] , lowerCamelCase : List[str] ):
lowerCAmelCase = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_path}
lowerCAmelCase = input_paths[compression_format]
lowerCAmelCase = tmp_path / 'cache'
lowerCAmelCase = DownloadConfig(cache_dir=lowerCamelCase , extract_compressed_file=lowerCamelCase )
lowerCAmelCase = cached_path(lowerCamelCase , download_config=lowerCamelCase )
with open(lowerCamelCase ) as f:
lowerCAmelCase = f.read()
with open(lowerCamelCase ) as f:
lowerCAmelCase = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('default_extracted' , [True, False] )
@pytest.mark.parametrize('default_cache_dir' , [True, False] )
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Dict , lowerCamelCase : int , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int] ):
lowerCAmelCase = 'custom_cache'
lowerCAmelCase = 'custom_extracted_dir'
lowerCAmelCase = tmp_path / 'custom_extracted_path'
if default_extracted:
lowerCAmelCase = ('downloads' if default_cache_dir else custom_cache_dir, 'extracted')
else:
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR' , lowerCamelCase )
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(lowerCamelCase ) )
lowerCAmelCase = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
lowerCAmelCase = xz_file
lowerCAmelCase = (
DownloadConfig(extract_compressed_file=lowerCamelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=lowerCamelCase )
)
lowerCAmelCase = cached_path(lowerCamelCase , download_config=lowerCamelCase )
assert Path(lowerCamelCase ).parent.parts[-2:] == expected
def a_ ( lowerCamelCase : Union[str, Any] ):
# absolute path
lowerCAmelCase = str(Path(lowerCamelCase ).resolve() )
assert cached_path(lowerCamelCase ) == text_file
# relative path
lowerCAmelCase = str(Path(lowerCamelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowerCamelCase ) == text_file
def a_ ( lowerCamelCase : int ):
# absolute path
lowerCAmelCase = str(tmp_path.resolve() / '__missing_file__.txt' )
with pytest.raises(lowerCamelCase ):
cached_path(lowerCamelCase )
# relative path
lowerCAmelCase = './__missing_file__.txt'
with pytest.raises(lowerCamelCase ):
cached_path(lowerCamelCase )
def a_ ( lowerCamelCase : str ):
lowerCAmelCase = get_from_cache(f'''tmp://{tmpfs_file}''' )
with open(lowerCamelCase ) as f:
lowerCAmelCase = f.read()
assert output_file_content == FILE_CONTENT
@patch('datasets.config.HF_DATASETS_OFFLINE' , lowerCamelCase )
def a_ ( ):
with pytest.raises(lowerCamelCase ):
cached_path('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , lowerCamelCase )
def a_ ( lowerCamelCase : Any ):
lowerCAmelCase = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(lowerCamelCase ):
http_get('https://huggingface.co' , temp_file=lowerCamelCase )
with pytest.raises(lowerCamelCase ):
http_head('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , lowerCamelCase )
def a_ ( lowerCamelCase : int ):
lowerCAmelCase = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(lowerCamelCase ):
ftp_get('ftp://huggingface.co' , temp_file=lowerCamelCase )
with pytest.raises(lowerCamelCase ):
ftp_head('ftp://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , lowerCamelCase )
def a_ ( lowerCamelCase : Dict ):
lowerCAmelCase = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(lowerCamelCase ):
fsspec_get('s3://huggingface.co' , temp_file=lowerCamelCase )
with pytest.raises(lowerCamelCase ):
fsspec_head('s3://huggingface.co' )
| 55 |
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class UpperCAmelCase_ ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase : Any = StableUnCLIPPipeline
lowerCamelCase : int = TEXT_TO_IMAGE_PARAMS
lowerCamelCase : Any = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
lowerCamelCase : Optional[int] = False
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
lowerCAmelCase = 3_2
lowerCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCAmelCase__ , projection_dim=UpperCAmelCase__ , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
lowerCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=1_2 , embedding_dim=UpperCAmelCase__ , num_layers=1 , )
torch.manual_seed(0 )
lowerCAmelCase = DDPMScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1_0_0_0 , clip_sample=UpperCAmelCase__ , clip_sample_range=5.0 , beta_schedule='squaredcos_cap_v2' , )
# regular denoising components
torch.manual_seed(0 )
lowerCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=UpperCAmelCase__ )
lowerCAmelCase = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCAmelCase__ , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
lowerCAmelCase = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCAmelCase__ , layers_per_block=1 , upcast_attention=UpperCAmelCase__ , use_linear_projection=UpperCAmelCase__ , )
torch.manual_seed(0 )
lowerCAmelCase = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=UpperCAmelCase__ , steps_offset=1 , )
torch.manual_seed(0 )
lowerCAmelCase = AutoencoderKL()
lowerCAmelCase = {
# prior components
'prior_tokenizer': prior_tokenizer,
'prior_text_encoder': prior_text_encoder,
'prior': prior,
'prior_scheduler': prior_scheduler,
# image noising components
'image_normalizer': image_normalizer,
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder,
'unet': unet,
'scheduler': scheduler,
'vae': vae,
}
return components
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any]=0 ) -> Optional[Any]:
if str(UpperCAmelCase__ ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(UpperCAmelCase__ )
else:
lowerCAmelCase = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'prior_num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
lowerCAmelCase = torch_device == 'cpu'
self._test_attention_slicing_forward_pass(test_max_difference=UpperCAmelCase__ )
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
lowerCAmelCase = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=UpperCAmelCase__ )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy' )
lowerCAmelCase = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase = pipe('anime turle' , generator=UpperCAmelCase__ , output_type='np' )
lowerCAmelCase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(UpperCAmelCase__ , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
lowerCAmelCase = pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase = pipe(
'anime turtle' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='np' , )
lowerCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 55 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ : Dict = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
a_ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 168 |
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 99 | 0 |
'''simple docstring'''
import os
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = os.path.dirname(os.path.realpath(__a ) )
_a : Optional[int] = os.path.join(__a , 'triangle.txt' )
with open(__a ) as f:
_a : int = f.readlines()
_a : str = []
for line in triangle:
_a : str = []
for number in line.strip().split(' ' ):
numbers_from_line.append(int(__a ) )
a.append(__a )
for i in range(1 , len(__a ) ):
for j in range(len(a[i] ) ):
_a : List[Any] = a[i - 1][j] if j != len(a[i - 1] ) else 0
_a : int = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(__a , __a )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 5 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = BlenderbotSmallTokenizer
__UpperCAmelCase : Tuple = False
def __lowercase ( self : List[Any] ):
'''simple docstring'''
super().setUp()
_a : List[str] = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
_a : Tuple = dict(zip(_a ,range(len(_a ) ) ) )
_a : List[Any] = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
_a : List[Any] = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
_a : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
_a : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(_a ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(_a ) )
def __lowercase ( self : List[Any] ,**_a : int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname ,**_a )
def __lowercase ( self : Tuple ,_a : int ):
'''simple docstring'''
_a : Optional[Any] = 'adapt act apte'
_a : Dict = 'adapt act apte'
return input_text, output_text
def __lowercase ( self : int ):
'''simple docstring'''
_a : Optional[int] = BlenderbotSmallTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
_a : Union[str, Any] = 'adapt act apte'
_a : Dict = ['adapt', 'act', 'ap@@', 'te']
_a : Tuple = tokenizer.tokenize(_a )
self.assertListEqual(_a ,_a )
_a : List[str] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_a : Dict = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) ,_a )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : str = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [1384]
_a : Union[str, Any] = 'I am a small frog.'
_a : int = tok([src_text] ,padding=_a ,truncation=_a )['input_ids']
_a : str = tok.batch_decode(_a ,skip_special_tokens=_a ,clean_up_tokenization_spaces=_a )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[int] = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
_a : Union[str, Any] = 'I am a small frog .'
_a : Optional[Any] = '.'
_a : Optional[Any] = tok(_a )['input_ids']
_a : Union[str, Any] = tok(_a )['input_ids']
assert encoded[-1] == encoded_dot[0]
| 5 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Union[str, Any] = {
"configuration_pix2struct": [
"PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Pix2StructConfig",
"Pix2StructTextConfig",
"Pix2StructVisionConfig",
],
"processing_pix2struct": ["Pix2StructProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ["Pix2StructImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = [
"PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Pix2StructPreTrainedModel",
"Pix2StructForConditionalGeneration",
"Pix2StructVisionModel",
"Pix2StructTextModel",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 93 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
a__: List[str] = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ):
A__ = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = '''A painting of a squirrel eating a burger '''
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=__lowerCamelCase,generator=__lowerCamelCase,guidance_scale=7.5,num_inference_steps=2,output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowerCamelCase )
A__ = VersatileDiffusionTextToImagePipeline.from_pretrained(__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = generator.manual_seed(0 )
A__ = pipe(
prompt=__lowerCamelCase,generator=__lowerCamelCase,guidance_scale=7.5,num_inference_steps=2,output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def UpperCamelCase ( self ):
A__ = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''',torch_dtype=torch.floataa )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = '''A painting of a squirrel eating a burger '''
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=__lowerCamelCase,generator=__lowerCamelCase,guidance_scale=7.5,num_inference_steps=50,output_type='''numpy''' ).images
A__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
A__ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 193 | 0 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase=3 , lowerCAmelCase=32 , lowerCAmelCase=3 , lowerCAmelCase=10 , lowerCAmelCase=[10, 20, 30, 40] , lowerCAmelCase=[1, 1, 2, 1] , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase="relu" , lowerCAmelCase=3 , lowerCAmelCase=None , ):
"""simple docstring"""
snake_case = parent
snake_case = batch_size
snake_case = image_size
snake_case = num_channels
snake_case = embeddings_size
snake_case = hidden_sizes
snake_case = depths
snake_case = is_training
snake_case = use_labels
snake_case = hidden_act
snake_case = num_labels
snake_case = scope
snake_case = len(lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.num_labels )
snake_case = self.get_config()
return config, pixel_values, labels
def snake_case ( self ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = TFResNetModel(config=lowerCAmelCase )
snake_case = model(lowerCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = self.num_labels
snake_case = TFResNetForImageClassification(lowerCAmelCase )
snake_case = model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.prepare_config_and_inputs()
snake_case ,snake_case ,snake_case = config_and_inputs
snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_lowerCAmelCase : Dict = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
_lowerCAmelCase : Union[str, Any] = False
_lowerCAmelCase : Any = False
_lowerCAmelCase : Union[str, Any] = False
_lowerCAmelCase : List[str] = False
_lowerCAmelCase : int = False
def snake_case ( self ):
"""simple docstring"""
snake_case = TFResNetModelTester(self )
snake_case = ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self ):
"""simple docstring"""
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self ):
"""simple docstring"""
snake_case ,snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(lowerCAmelCase )
snake_case = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case = [*signature.parameters.keys()]
snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
snake_case = model_class(lowerCAmelCase )
snake_case = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case ,snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
snake_case = layer_type
snake_case = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@slow
def snake_case ( self ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = TFResNetModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def lowerCAmelCase__ ( ) -> Optional[Any]:
"""simple docstring"""
snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
snake_case = self.default_image_processor
snake_case = prepare_img()
snake_case = image_processor(images=lowerCAmelCase , return_tensors='tf' )
# forward pass
snake_case = model(**lowerCAmelCase )
# verify the logits
snake_case = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
snake_case = tf.constant([-11.10_69, -9.78_77, -8.37_77] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowerCAmelCase , atol=1E-4 ) )
| 149 |
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
SCREAMING_SNAKE_CASE__ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class lowerCAmelCase_ ( datasets.BuilderConfig ):
"""simple docstring"""
_lowerCAmelCase : Optional[datasets.Features] = None
def lowerCAmelCase__ ( _UpperCamelCase : "pyspark.sql.DataFrame" , _UpperCamelCase : List[int] , ) -> Dict:
"""simple docstring"""
import pyspark
def generate_fn():
snake_case = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
snake_case = df_with_partition_id.select('*' ).where(f"""part_id = {partition_id}""" ).drop('part_id' )
snake_case = partition_df.collect()
snake_case = 0
for row in rows:
yield f"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class lowerCAmelCase_ ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase=None , ):
"""simple docstring"""
snake_case = df
snake_case = partition_order or range(self.df.rdd.getNumPartitions() )
snake_case = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ):
"""simple docstring"""
yield from self.generate_examples_fn()
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowerCAmelCase )
return SparkExamplesIterable(self.df , partition_order=lowerCAmelCase )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = self.split_shard_indices_by_worker(lowerCAmelCase , lowerCAmelCase )
return SparkExamplesIterable(self.df , partition_order=lowerCAmelCase )
@property
def snake_case ( self ):
"""simple docstring"""
return len(self.partition_order )
class lowerCAmelCase_ ( datasets.DatasetBuilder ):
"""simple docstring"""
_lowerCAmelCase : List[str] = SparkConfig
def __init__( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ):
"""simple docstring"""
import pyspark
snake_case = pyspark.sql.SparkSession.builder.getOrCreate()
snake_case = df
snake_case = working_dir
super().__init__(
cache_dir=lowerCAmelCase , config_name=str(self.df.semanticHash() ) , **lowerCAmelCase , )
def snake_case ( self ):
"""simple docstring"""
def create_cache_and_write_probe(lowerCAmelCase ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=lowerCAmelCase )
snake_case = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowerCAmelCase , 'a' )
return [probe_file]
if self._spark.conf.get('spark.master' , '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
snake_case = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowerCAmelCase ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def snake_case ( self ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
import pyspark
def get_arrow_batch_size(lowerCAmelCase ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
snake_case = self.df.count()
snake_case = df_num_rows if df_num_rows <= 1_00 else 1_00
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
snake_case = (
self.df.limit(lowerCAmelCase )
.repartition(1 )
.mapInArrow(lowerCAmelCase , 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
snake_case = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
snake_case = min(lowerCAmelCase , int(approx_total_size / max_shard_size ) )
snake_case = self.df.repartition(lowerCAmelCase )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
"""simple docstring"""
import pyspark
snake_case = ParquetWriter if file_format == 'parquet' else ArrowWriter
snake_case = os.path.join(self._working_dir , os.path.basename(lowerCAmelCase ) ) if self._working_dir else fpath
snake_case = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
snake_case = self.config.features
snake_case = self._writer_batch_size
snake_case = self._fs.storage_options
def write_arrow(lowerCAmelCase ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
snake_case = pyspark.TaskContext().taskAttemptId()
snake_case = next(lowerCAmelCase , lowerCAmelCase )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , )
snake_case = 0
snake_case = writer_class(
features=lowerCAmelCase , path=working_fpath.replace('SSSSS' , F"""{shard_id:05d}""" ).replace('TTTTT' , F"""{task_id:05d}""" ) , writer_batch_size=lowerCAmelCase , storage_options=lowerCAmelCase , embed_local_files=lowerCAmelCase , )
snake_case = pa.Table.from_batches([first_batch] )
writer.write_table(lowerCAmelCase )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
snake_case ,snake_case = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
shard_id += 1
snake_case = writer_class(
features=writer._features , path=working_fpath.replace('SSSSS' , F"""{shard_id:05d}""" ).replace('TTTTT' , F"""{task_id:05d}""" ) , writer_batch_size=lowerCAmelCase , storage_options=lowerCAmelCase , embed_local_files=lowerCAmelCase , )
snake_case = pa.Table.from_batches([batch] )
writer.write_table(lowerCAmelCase )
if writer._num_bytes > 0:
snake_case ,snake_case = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowerCAmelCase ) ):
snake_case = os.path.join(os.path.dirname(lowerCAmelCase ) , os.path.basename(lowerCAmelCase ) )
shutil.move(lowerCAmelCase , lowerCAmelCase )
snake_case = (
self.df.mapInArrow(lowerCAmelCase , 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def snake_case ( self , lowerCAmelCase , lowerCAmelCase = "arrow" , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ):
"""simple docstring"""
self._validate_cache_dir()
snake_case = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowerCAmelCase )
snake_case = not is_remote_filesystem(self._fs )
snake_case = os.path.join if is_local else posixpath.join
snake_case = '-TTTTT-SSSSS-of-NNNNN'
snake_case = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
snake_case = path_join(self._output_dir , lowerCAmelCase )
snake_case = 0
snake_case = 0
snake_case = 0
snake_case = []
snake_case = []
for task_id, content in self._prepare_split_single(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
(
(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowerCAmelCase )
snake_case = total_num_examples
snake_case = total_num_bytes
# should rename everything at the end
logger.debug(F"""Renaming {total_shards} shards.""" )
if total_shards > 1:
snake_case = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
snake_case = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
rename(
lowerCAmelCase , fpath.replace('SSSSS' , F"""{shard_id:05d}""" ).replace('TTTTT' , F"""{task_id:05d}""" ) , fpath.replace('TTTTT-SSSSS' , F"""{global_shard_id:05d}""" ).replace('NNNNN' , F"""{total_shards:05d}""" ) , )
snake_case = []
snake_case = 0
for i in range(len(lowerCAmelCase ) ):
snake_case ,snake_case = task_id_and_num_shards[i]
for shard_id in range(lowerCAmelCase ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowerCAmelCase , len(lowerCAmelCase ) ).map(lambda lowerCAmelCase : _rename_shard(*lowerCAmelCase ) ).collect()
else:
# don't use any pattern
snake_case = 0
snake_case = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS' , F"""{shard_id:05d}""" ).replace('TTTTT' , F"""{task_id:05d}""" ) , fpath.replace(lowerCAmelCase , '' ) , )
def snake_case ( self , lowerCAmelCase , ):
"""simple docstring"""
return SparkExamplesIterable(self.df )
| 149 | 1 |
'''simple docstring'''
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
A_ : List[str] = """bert-base-cased"""
A_ : Tuple = """google/pegasus-xsum"""
A_ : Union[str, Any] = [""" Sam ate lunch today.""", """Sams lunch ingredients."""]
A_ : List[str] = ["""A very interesting story about what I ate for lunch.""", """Avocado, celery, turkey, coffee"""]
A_ : Dict = """patrickvonplaten/t5-tiny-random"""
A_ : int = """sshleifer/bart-tiny-random"""
A_ : Optional[int] = """sshleifer/tiny-mbart"""
A_ : Any = """sshleifer/tiny-marian-en-de"""
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = """\n""".join(lowerCAmelCase_ )
Path(lowerCAmelCase_ ).open("""w""" ).writelines(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ )-> int:
'''simple docstring'''
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(lowerCAmelCase_ , F'''{split}.source''' ) , lowerCAmelCase_ )
_dump_articles(os.path.join(lowerCAmelCase_ , F'''{split}.target''' ) , lowerCAmelCase_ )
return tmp_dir
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] ,)
@slow
def _snake_case ( self ,a_ ) -> List[Any]:
_UpperCAmelCase : str = AutoTokenizer.from_pretrained(a_ )
_UpperCAmelCase : Any = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_UpperCAmelCase : Union[str, Any] = max(len(tokenizer.encode(a_ ) ) for a in ARTICLES )
_UpperCAmelCase : Tuple = max(len(tokenizer.encode(a_ ) ) for a in SUMMARIES )
_UpperCAmelCase : Any = 4
_UpperCAmelCase : Optional[int] = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
_UpperCAmelCase ,_UpperCAmelCase : str = """ro_RO""", """de_DE""" # ignored for all but mbart, but never causes error.
_UpperCAmelCase : Dict = SeqaSeqDataset(
a_ ,data_dir=a_ ,type_path="""train""" ,max_source_length=a_ ,max_target_length=a_ ,src_lang=a_ ,tgt_lang=a_ ,)
_UpperCAmelCase : Tuple = DataLoader(a_ ,batch_size=2 ,collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(a_ ,a_ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
_UpperCAmelCase : Any = shift_tokens_right(batch["""labels"""] ,tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def _snake_case ( self ,a_ ) -> str:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained(a_ )
_UpperCAmelCase : Optional[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_UpperCAmelCase : List[str] = max(len(tokenizer.encode(a_ ) ) for a in ARTICLES )
_UpperCAmelCase : Tuple = max(len(tokenizer.encode(a_ ) ) for a in SUMMARIES )
_UpperCAmelCase : Tuple = 4
_UpperCAmelCase : List[Any] = LegacySeqaSeqDataset(
a_ ,data_dir=a_ ,type_path="""train""" ,max_source_length=20 ,max_target_length=a_ ,)
_UpperCAmelCase : int = DataLoader(a_ ,batch_size=2 ,collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def _snake_case ( self ) -> Tuple:
_UpperCAmelCase : str = AutoTokenizer.from_pretrained("""facebook/mbart-large-cc25""" )
_UpperCAmelCase : List[str] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
_UpperCAmelCase : Dict = tmp_dir.joinpath("""train.source""" ).open().readlines()
_UpperCAmelCase : Optional[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(a_ ,a_ ,128 ,a_ )
_UpperCAmelCase : List[str] = {x.name for x in tmp_dir.iterdir()}
_UpperCAmelCase : Tuple = {x.name for x in save_dir.iterdir()}
_UpperCAmelCase : List[str] = save_dir.joinpath("""train.source""" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(a_ ) < len(a_ )
assert len(a_ ) == 1
assert len(packed_examples[0] ) == sum(len(a_ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE ,reason="""This test requires fairseq""" )
def _snake_case ( self ) -> int:
if not FAIRSEQ_AVAILABLE:
return
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : str = self._get_dataset(max_len=64 )
_UpperCAmelCase : Any = 64
_UpperCAmelCase : List[str] = ds.make_dynamic_sampler(a_ ,required_batch_size_multiple=a_ )
_UpperCAmelCase : Dict = [len(a_ ) for x in batch_sampler]
assert len(set(a_ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(a_ ) == len(a_ ) # no dropped or added examples
_UpperCAmelCase : Union[str, Any] = DataLoader(a_ ,batch_sampler=a_ ,collate_fn=ds.collate_fn ,num_workers=2 )
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : List[Any] = []
for batch in data_loader:
_UpperCAmelCase : Optional[Any] = batch["""input_ids"""].shape
_UpperCAmelCase : Tuple = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
_UpperCAmelCase : Tuple = np.product(batch["""input_ids"""].shape )
num_src_per_batch.append(a_ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(a_ )
assert num_src_per_batch[0] == max(a_ )
if failures:
raise AssertionError(f'''too many tokens in {len(a_ )} batches''' )
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Dict = self._get_dataset(max_len=512 )
_UpperCAmelCase : Optional[Any] = 2
_UpperCAmelCase : Optional[Any] = ds.make_sortish_sampler(a_ ,shuffle=a_ )
_UpperCAmelCase : Optional[int] = DataLoader(a_ ,batch_size=a_ ,collate_fn=ds.collate_fn ,num_workers=2 )
_UpperCAmelCase : str = DataLoader(a_ ,batch_size=a_ ,collate_fn=ds.collate_fn ,num_workers=2 ,sampler=a_ )
_UpperCAmelCase : Union[str, Any] = tokenizer.pad_token_id
def count_pad_tokens(a_ ,a_="input_ids" ):
return [batch[k].eq(a_ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(a_ ,k="""labels""" ) ) < sum(count_pad_tokens(a_ ,k="""labels""" ) )
assert sum(count_pad_tokens(a_ ) ) < sum(count_pad_tokens(a_ ) )
assert len(a_ ) == len(a_ )
def _snake_case ( self ,a_=1_000 ,a_=128 ) -> List[str]:
if os.getenv("""USE_REAL_DATA""" ,a_ ):
_UpperCAmelCase : List[Any] = """examples/seq2seq/wmt_en_ro"""
_UpperCAmelCase : int = max_len * 2 * 64
if not Path(a_ ).joinpath("""train.len""" ).exists():
save_len_file(a_ ,a_ )
else:
_UpperCAmelCase : Any = """examples/seq2seq/test_data/wmt_en_ro"""
_UpperCAmelCase : Tuple = max_len * 4
save_len_file(a_ ,a_ )
_UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(a_ )
_UpperCAmelCase : Tuple = SeqaSeqDataset(
a_ ,data_dir=a_ ,type_path="""train""" ,max_source_length=a_ ,max_target_length=a_ ,n_obs=a_ ,)
return ds, max_tokens, tokenizer
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = self._get_dataset()
_UpperCAmelCase : Any = set(DistributedSortishSampler(a_ ,256 ,num_replicas=2 ,rank=0 ,add_extra_examples=a_ ) )
_UpperCAmelCase : Dict = set(DistributedSortishSampler(a_ ,256 ,num_replicas=2 ,rank=1 ,add_extra_examples=a_ ) )
assert idsa.intersection(a_ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] ,)
def _snake_case ( self ,a_ ) -> Any:
_UpperCAmelCase : str = AutoTokenizer.from_pretrained(a_ ,use_fast=a_ )
if tok_name == MBART_TINY:
_UpperCAmelCase : Union[str, Any] = SeqaSeqDataset(
a_ ,data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ,type_path="""train""" ,max_source_length=4 ,max_target_length=8 ,src_lang="""EN""" ,tgt_lang="""FR""" ,)
_UpperCAmelCase : Dict = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
_UpperCAmelCase : Union[str, Any] = SeqaSeqDataset(
a_ ,data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ,type_path="""train""" ,max_source_length=4 ,max_target_length=8 ,)
_UpperCAmelCase : Union[str, Any] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(a_ ) == 1 if tok_name == BART_TINY else len(a_ ) == 0
| 215 |
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ : List[str] = logging.get_logger(__name__)
A_ : Optional[Any] = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
A_ : Dict = {
"""b0""": {
"""hidden_dim""": 1_2_8_0,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 2_2_4,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1_2_8_0,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 2_4_0,
"""dropout_rate""": 0.2,
"""dw_padding""": [1_6],
},
"""b2""": {
"""hidden_dim""": 1_4_0_8,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 2_6_0,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 1_6],
},
"""b3""": {
"""hidden_dim""": 1_5_3_6,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 3_0_0,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 1_8],
},
"""b4""": {
"""hidden_dim""": 1_7_9_2,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 3_8_0,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2_0_4_8,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 4_5_6,
"""dropout_rate""": 0.4,
"""dw_padding""": [1_3, 2_7],
},
"""b6""": {
"""hidden_dim""": 2_3_0_4,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 5_2_8,
"""dropout_rate""": 0.5,
"""dw_padding""": [3_1],
},
"""b7""": {
"""hidden_dim""": 2_5_6_0,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 6_0_0,
"""dropout_rate""": 0.5,
"""dw_padding""": [1_8],
},
}
def snake_case_ ( lowerCAmelCase_ )-> int:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = EfficientNetConfig()
_UpperCAmelCase : List[Any] = CONFIG_MAP[model_name]["""hidden_dim"""]
_UpperCAmelCase : str = CONFIG_MAP[model_name]["""width_coef"""]
_UpperCAmelCase : int = CONFIG_MAP[model_name]["""depth_coef"""]
_UpperCAmelCase : Optional[Any] = CONFIG_MAP[model_name]["""image_size"""]
_UpperCAmelCase : List[str] = CONFIG_MAP[model_name]["""dropout_rate"""]
_UpperCAmelCase : Optional[int] = CONFIG_MAP[model_name]["""dw_padding"""]
_UpperCAmelCase : List[str] = """huggingface/label-files"""
_UpperCAmelCase : Optional[Any] = """imagenet-1k-id2label.json"""
_UpperCAmelCase : List[str] = 1000
_UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="""dataset""" ) , """r""" ) )
_UpperCAmelCase : Dict = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
_UpperCAmelCase : List[str] = idalabel
_UpperCAmelCase : str = {v: k for k, v in idalabel.items()}
return config
def snake_case_ ( )-> Tuple:
'''simple docstring'''
_UpperCAmelCase : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_UpperCAmelCase : Tuple = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
def snake_case_ ( lowerCAmelCase_ )-> Tuple:
'''simple docstring'''
_UpperCAmelCase : str = CONFIG_MAP[model_name]["""image_size"""]
_UpperCAmelCase : Tuple = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=lowerCAmelCase_ , )
return preprocessor
def snake_case_ ( lowerCAmelCase_ )-> List[str]:
'''simple docstring'''
_UpperCAmelCase : int = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
_UpperCAmelCase : Optional[int] = sorted(set(lowerCAmelCase_ ) )
_UpperCAmelCase : str = len(lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = {b: str(lowerCAmelCase_ ) for b, i in zip(lowerCAmelCase_ , range(lowerCAmelCase_ ) )}
_UpperCAmelCase : List[str] = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
_UpperCAmelCase : Any = block_name_mapping[b]
rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
_UpperCAmelCase : Union[str, Any] = {}
for item in rename_keys:
if item[0] in original_param_names:
_UpperCAmelCase : str = """efficientnet.""" + item[1]
_UpperCAmelCase : Optional[Any] = """classifier.weight"""
_UpperCAmelCase : Optional[Any] = """classifier.bias"""
return key_mapping
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
_UpperCAmelCase : Union[str, Any] = key_mapping[key]
if "_conv" in key and "kernel" in key:
_UpperCAmelCase : Optional[int] = torch.from_numpy(lowerCAmelCase_ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
_UpperCAmelCase : Any = torch.from_numpy(lowerCAmelCase_ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
_UpperCAmelCase : int = torch.from_numpy(np.transpose(lowerCAmelCase_ ) )
else:
_UpperCAmelCase : List[str] = torch.from_numpy(lowerCAmelCase_ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowerCAmelCase_ )
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = model_classes[model_name](
include_top=lowerCAmelCase_ , weights="""imagenet""" , input_tensor=lowerCAmelCase_ , input_shape=lowerCAmelCase_ , pooling=lowerCAmelCase_ , classes=1000 , classifier_activation="""softmax""" , )
_UpperCAmelCase : List[str] = original_model.trainable_variables
_UpperCAmelCase : Any = original_model.non_trainable_variables
_UpperCAmelCase : Optional[int] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
_UpperCAmelCase : Dict = param.numpy()
_UpperCAmelCase : Optional[Any] = list(tf_params.keys() )
# Load HuggingFace model
_UpperCAmelCase : List[Any] = get_efficientnet_config(lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = EfficientNetForImageClassification(lowerCAmelCase_ ).eval()
_UpperCAmelCase : int = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
_UpperCAmelCase : Optional[int] = rename_keys(lowerCAmelCase_ )
replace_params(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Initialize preprocessor and preprocess input image
_UpperCAmelCase : str = convert_image_processor(lowerCAmelCase_ )
_UpperCAmelCase : List[str] = preprocessor(images=prepare_img() , return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
_UpperCAmelCase : List[str] = hf_model(**lowerCAmelCase_ )
_UpperCAmelCase : Any = outputs.logits.detach().numpy()
# Original model inference
_UpperCAmelCase : int = False
_UpperCAmelCase : Optional[int] = CONFIG_MAP[model_name]["""image_size"""]
_UpperCAmelCase : Dict = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
_UpperCAmelCase : Optional[Any] = image.img_to_array(lowerCAmelCase_ )
_UpperCAmelCase : str = np.expand_dims(lowerCAmelCase_ , axis=0 )
_UpperCAmelCase : str = original_model.predict(lowerCAmelCase_ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowerCAmelCase_ ):
os.mkdir(lowerCAmelCase_ )
# Save converted model and image processor
hf_model.save_pretrained(lowerCAmelCase_ )
preprocessor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
# Push model and image processor to hub
print(F'''Pushing converted {model_name} to the hub...''' )
_UpperCAmelCase : List[Any] = F'''efficientnet-{model_name}'''
preprocessor.push_to_hub(lowerCAmelCase_ )
hf_model.push_to_hub(lowerCAmelCase_ )
if __name__ == "__main__":
A_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
A_ : Optional[Any] = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 215 | 1 |
import sys
from collections import defaultdict
class a__ :
def __init__( self ) -> int:
__a = []
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> Optional[Any]:
return self.node_position[vertex]
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
__a = pos
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__a = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__a = 2 * start + 1
else:
__a = 2 * start + 2
if heap[smallest_child] < heap[start]:
__a , __a = heap[smallest_child], positions[smallest_child]
__a , __a = (
heap[start],
positions[start],
)
__a , __a = temp, tempa
__a = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , UpperCAmelCase )
self.top_to_bottom(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
__a = position[index]
while index != 0:
__a = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__a = heap[parent]
__a = position[parent]
self.set_position(position[parent] , UpperCAmelCase )
else:
__a = val
__a = temp
self.set_position(UpperCAmelCase , UpperCAmelCase )
break
__a = parent
else:
__a = val
__a = temp
self.set_position(UpperCAmelCase , 0 )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
__a = len(UpperCAmelCase ) // 2 - 1
for i in range(UpperCAmelCase , -1 , -1 ):
self.top_to_bottom(UpperCAmelCase , UpperCAmelCase , len(UpperCAmelCase ) , UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
__a = positions[0]
__a = sys.maxsize
self.top_to_bottom(UpperCAmelCase , 0 , len(UpperCAmelCase ) , UpperCAmelCase )
return temp
def lowerCAmelCase( __lowerCamelCase ):
__a = Heap()
__a = [0] * len(__lowerCamelCase )
__a = [-1] * len(__lowerCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__a = [] # Heap of Distance of vertices from their neighboring vertex
__a = []
for vertex in range(len(__lowerCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(__lowerCamelCase )
heap.node_position.append(__lowerCamelCase )
__a = []
__a = 1
__a = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__a = 0
__a = distance
heap.heapify(__lowerCamelCase , __lowerCamelCase )
for _ in range(1 , len(__lowerCamelCase ) ):
__a = heap.delete_minimum(__lowerCamelCase , __lowerCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__a = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__lowerCamelCase )]
):
__a = distance
heap.bottom_to_top(
__lowerCamelCase , heap.get_position(__lowerCamelCase ) , __lowerCamelCase , __lowerCamelCase )
__a = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
lowerCamelCase_ : Optional[int] = int(input("""Enter number of edges: """).strip())
lowerCamelCase_ : Dict = defaultdict(list)
for _ in range(edges_number):
lowerCamelCase_ : Optional[Any] = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 197 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class a__ :
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
torch.manual_seed(0 )
__a = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
__a = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
__a = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__a = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0_001 , beta_end=0.02 , thresholding=UpperCAmelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
__a = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
torch.manual_seed(0 )
__a = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
__a = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
__a = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.414 , time_embedding_act_fn='gelu' , time_embedding_dim=3_2 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__a = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0_001 , beta_end=0.02 , thresholding=UpperCAmelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
__a = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0_001 , beta_end=0.02 , )
torch.manual_seed(0 )
__a = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __SCREAMING_SNAKE_CASE ( self ) -> str:
__a = self.get_dummy_components()
__a = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__a = self.get_dummy_inputs(UpperCAmelCase )
__a = inputs['prompt']
__a = inputs['generator']
__a = inputs['num_inference_steps']
__a = inputs['output_type']
if "image" in inputs:
__a = inputs['image']
else:
__a = None
if "mask_image" in inputs:
__a = inputs['mask_image']
else:
__a = None
if "original_image" in inputs:
__a = inputs['original_image']
else:
__a = None
__a , __a = pipe.encode_prompt(UpperCAmelCase )
# inputs with prompt converted to embeddings
__a = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
__a = image
if mask_image is not None:
__a = mask_image
if original_image is not None:
__a = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__a = pipe(**UpperCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase )
__a = self.pipeline_class.from_pretrained(UpperCAmelCase )
pipe_loaded.to(UpperCAmelCase )
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCAmelCase , UpperCAmelCase ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
__a = self.get_dummy_inputs(UpperCAmelCase )
__a = inputs['generator']
__a = inputs['num_inference_steps']
__a = inputs['output_type']
# inputs with prompt converted to embeddings
__a = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
__a = image
if mask_image is not None:
__a = mask_image
if original_image is not None:
__a = original_image
__a = pipe_loaded(**UpperCAmelCase )[0]
__a = np.abs(to_np(UpperCAmelCase ) - to_np(UpperCAmelCase ) ).max()
self.assertLess(UpperCAmelCase , 1e-4 )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
__a = self.get_dummy_components()
__a = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__a = self.get_dummy_inputs(UpperCAmelCase )
__a = pipe(**UpperCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase )
__a = self.pipeline_class.from_pretrained(UpperCAmelCase )
pipe_loaded.to(UpperCAmelCase )
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
__a = self.get_dummy_inputs(UpperCAmelCase )
__a = pipe_loaded(**UpperCAmelCase )[0]
__a = np.abs(to_np(UpperCAmelCase ) - to_np(UpperCAmelCase ) ).max()
self.assertLess(UpperCAmelCase , 1e-4 )
| 197 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] =AltDiffusionPipeline
UpperCAmelCase__ : int =TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__ : Union[str, Any] =TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase__ : Dict =TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase__ : List[Any] =TEXT_TO_IMAGE_IMAGE_PARAMS
def _lowercase ( self : List[str] ) ->int:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
SCREAMING_SNAKE_CASE : int = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=UpperCAmelCase__ , set_alpha_to_one=UpperCAmelCase__ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_2 , )
SCREAMING_SNAKE_CASE : Tuple = CLIPTextModel(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
SCREAMING_SNAKE_CASE : Tuple = 7_7
SCREAMING_SNAKE_CASE : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _lowercase ( self : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any]=0 ) ->Optional[int]:
"""simple docstring"""
if str(UpperCAmelCase__ ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(UpperCAmelCase__ )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _lowercase ( self : Any ) ->List[str]:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def _lowercase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _lowercase ( self : Union[str, Any] ) ->List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_components()
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
SCREAMING_SNAKE_CASE : List[str] = RobertaSeriesModelWithTransformation(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = text_encoder
SCREAMING_SNAKE_CASE : Any = AltDiffusionPipeline(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = alt_pipe.to(UpperCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = """A photo of an astronaut"""
SCREAMING_SNAKE_CASE : Union[str, Any] = alt_pipe(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = output.images
SCREAMING_SNAKE_CASE : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
SCREAMING_SNAKE_CASE : int = np.array(
[0.5_74_81_62, 0.60_44_71_45, 0.48_82_12_17, 0.50_10_06_36, 0.5_43_11_85, 0.45_76_36_83, 0.49_65_76_96, 0.48_13_27_33, 0.47_57_30_93] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self : Dict ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
SCREAMING_SNAKE_CASE : int = PNDMScheduler(skip_prk_steps=UpperCAmelCase__ )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
SCREAMING_SNAKE_CASE : Dict = RobertaSeriesModelWithTransformation(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = text_encoder
SCREAMING_SNAKE_CASE : List[str] = AltDiffusionPipeline(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = alt_pipe.to(UpperCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : int = self.get_dummy_inputs(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = alt_pipe(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Dict = output.images
SCREAMING_SNAKE_CASE : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
SCREAMING_SNAKE_CASE : str = np.array(
[0.51_60_50_93, 0.5_70_72_41, 0.47_36_55_07, 0.50_57_88_86, 0.5_63_38_77, 0.4_64_25_03, 0.5_18_20_81, 0.48_76_34_84, 0.49_08_42_37] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Dict ) ->Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : str ) ->Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , safety_checker=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = alt_pipe.to(UpperCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE : int = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = alt_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2_0 , output_type="""np""" )
SCREAMING_SNAKE_CASE : Dict = output.images
SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
SCREAMING_SNAKE_CASE : Any = np.array([0.10_10, 0.08_00, 0.07_94, 0.08_85, 0.08_43, 0.07_62, 0.07_69, 0.07_29, 0.05_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self : List[str] ) ->Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = DDIMScheduler.from_pretrained("""BAAI/AltDiffusion""" , subfolder="""scheduler""" )
SCREAMING_SNAKE_CASE : int = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , scheduler=UpperCAmelCase__ , safety_checker=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = alt_pipe.to(UpperCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Dict = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE : int = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = alt_pipe([prompt] , generator=UpperCAmelCase__ , num_inference_steps=2 , output_type="""numpy""" )
SCREAMING_SNAKE_CASE : List[Any] = output.images
SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.40_19, 0.40_52, 0.38_10, 0.41_19, 0.39_16, 0.39_82, 0.46_51, 0.41_95, 0.53_23] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 245 |
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
UpperCAmelCase__ : Tuple = TypeVar("""T""")
class a__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase__ : bool = True ) ->None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : dict[T, list[T]] = {} # dictionary of lists
SCREAMING_SNAKE_CASE : Dict = directed
def _lowercase ( self : int , UpperCAmelCase__ : T , UpperCAmelCase__ : T ) ->GraphAdjacencyList[T]:
"""simple docstring"""
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase__ )
self.adj_list[destination_vertex].append(UpperCAmelCase__ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : int = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : str = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
SCREAMING_SNAKE_CASE : Tuple = [destination_vertex]
SCREAMING_SNAKE_CASE : str = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase__ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
SCREAMING_SNAKE_CASE : Optional[Any] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
SCREAMING_SNAKE_CASE : Dict = [destination_vertex]
SCREAMING_SNAKE_CASE : List[Any] = []
return self
def __repr__( self : Dict ) ->str:
"""simple docstring"""
return pformat(self.adj_list )
| 245 | 1 |
import logging
from transformers.configuration_utils import PretrainedConfig
a =logging.getLogger(__name__)
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[Any] = '''masked_bert'''
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : Optional[Any]=3_0_5_2_2 ,SCREAMING_SNAKE_CASE__ : str=7_6_8 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=1_2 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_0_7_2 ,SCREAMING_SNAKE_CASE__ : Dict="gelu" ,SCREAMING_SNAKE_CASE__ : List[Any]=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 ,SCREAMING_SNAKE_CASE__ : List[str]=5_1_2 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=2 ,SCREAMING_SNAKE_CASE__ : List[str]=0.02 ,SCREAMING_SNAKE_CASE__ : str=1E-12 ,SCREAMING_SNAKE_CASE__ : List[str]=0 ,SCREAMING_SNAKE_CASE__ : int="topK" ,SCREAMING_SNAKE_CASE__ : Any="constant" ,SCREAMING_SNAKE_CASE__ : int=0.0 ,**SCREAMING_SNAKE_CASE__ : List[Any] ,):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = vocab_size
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : int = num_hidden_layers
__lowerCamelCase : int = num_attention_heads
__lowerCamelCase : Any = hidden_act
__lowerCamelCase : Union[str, Any] = intermediate_size
__lowerCamelCase : Optional[Any] = hidden_dropout_prob
__lowerCamelCase : Dict = attention_probs_dropout_prob
__lowerCamelCase : Optional[int] = max_position_embeddings
__lowerCamelCase : Tuple = type_vocab_size
__lowerCamelCase : str = initializer_range
__lowerCamelCase : Optional[Any] = layer_norm_eps
__lowerCamelCase : Optional[Any] = pruning_method
__lowerCamelCase : List[Any] = mask_init
__lowerCamelCase : Dict = mask_scale
| 113 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a ={
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 113 | 1 |
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
__a = re.compile(R'([A-Z]+)([A-Z][a-z])')
__a = re.compile(R'([a-z\d])([A-Z])')
__a = re.compile(R'(?<!_)_(?!_)')
__a = re.compile(R'(_{2,})')
__a = R'^\w+(\.\w+)*$'
__a = R'<>:/\|?*'
def __UpperCAmelCase ( a_: int ):
_UpperCAmelCase : Any = _uppercase_uppercase_re.sub(r"\1_\2", a_ )
_UpperCAmelCase : List[str] = _lowercase_uppercase_re.sub(r"\1_\2", a_ )
return name.lower()
def __UpperCAmelCase ( a_: Optional[Any] ):
_UpperCAmelCase : List[Any] = _single_underscore_re.split(a_ )
_UpperCAmelCase : Optional[int] = [_multiple_underscores_re.split(a_ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(a_ ) if n != "" )
def __UpperCAmelCase ( a_: Optional[Any] ):
if os.path.basename(a_ ) != name:
raise ValueError(f"""Should be a dataset name, not a path: {name}""" )
return camelcase_to_snakecase(a_ )
def __UpperCAmelCase ( a_: Tuple, a_: Optional[Any] ):
if os.path.basename(a_ ) != name:
raise ValueError(f"""Should be a dataset name, not a path: {name}""" )
if not re.match(_split_re, a_ ):
raise ValueError(f"""Split name should match '{_split_re}'' but got '{split}'.""" )
return f"""{filename_prefix_for_name(a_ )}-{split}"""
def __UpperCAmelCase ( a_: List[str], a_: str, a_: Any, a_: Dict=None ):
_UpperCAmelCase : Any = filename_prefix_for_split(a_, a_ )
if filetype_suffix:
prefix += f""".{filetype_suffix}"""
_UpperCAmelCase : List[Any] = os.path.join(a_, a_ )
return f"""{filepath}*"""
def __UpperCAmelCase ( a_: Dict, a_: str, a_: Union[str, Any], a_: Optional[Any]=None, a_: Tuple=None ):
_UpperCAmelCase : Dict = filename_prefix_for_split(a_, a_ )
_UpperCAmelCase : List[str] = os.path.join(a_, a_ )
if shard_lengths:
_UpperCAmelCase : Dict = len(a_ )
_UpperCAmelCase : Optional[Any] = [f"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(a_ )]
if filetype_suffix:
_UpperCAmelCase : Any = [filename + f""".{filetype_suffix}""" for filename in filenames]
return filenames
else:
_UpperCAmelCase : List[str] = prefix
if filetype_suffix:
filename += f""".{filetype_suffix}"""
return [filename]
| 145 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = '''ibert'''
def __init__( self : int , lowerCAmelCase__ : Any=3_0_5_2_2 , lowerCAmelCase__ : List[Any]=7_6_8 , lowerCAmelCase__ : int=1_2 , lowerCAmelCase__ : List[Any]=1_2 , lowerCAmelCase__ : Optional[int]=3_0_7_2 , lowerCAmelCase__ : List[str]="gelu" , lowerCAmelCase__ : str=0.1 , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : Optional[Any]=5_1_2 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : Any=0.02 , lowerCAmelCase__ : Union[str, Any]=1e-12 , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : Optional[Any]=0 , lowerCAmelCase__ : Tuple=2 , lowerCAmelCase__ : Optional[Any]="absolute" , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : Tuple="none" , **lowerCAmelCase__ : Optional[Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : Any = vocab_size
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : Optional[int] = num_hidden_layers
_UpperCAmelCase : Union[str, Any] = num_attention_heads
_UpperCAmelCase : Any = hidden_act
_UpperCAmelCase : Any = intermediate_size
_UpperCAmelCase : Tuple = hidden_dropout_prob
_UpperCAmelCase : str = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Union[str, Any] = type_vocab_size
_UpperCAmelCase : Optional[int] = initializer_range
_UpperCAmelCase : Tuple = layer_norm_eps
_UpperCAmelCase : Optional[int] = position_embedding_type
_UpperCAmelCase : Any = quant_mode
_UpperCAmelCase : Optional[Any] = force_dequant
class A__ ( UpperCamelCase ):
"""simple docstring"""
@property
def _lowerCAmelCase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 145 | 1 |
"""simple docstring"""
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _snake_case ( ):
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(UpperCAmelCase_ ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def _snake_case ( ):
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def _snake_case ( ):
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(UpperCAmelCase_ ):
http_head("""https://huggingface.co""" )
| 69 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
SCREAMING_SNAKE_CASE_ : int = logging.get_logger(__name__)
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Optional[int] , *UpperCamelCase: Optional[int] , **UpperCamelCase: Optional[int] ):
"""simple docstring"""
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 69 | 1 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = """▁"""
lowerCAmelCase__ = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowerCAmelCase__ = {
"""vocab_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json""",
},
"""spm_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_config_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json""",
},
}
lowerCAmelCase__ = {
"""facebook/m2m100_418M""": 1_0_2_4,
}
# fmt: off
lowerCAmelCase__ = {
"""m2m100""": ["""af""", """am""", """ar""", """ast""", """az""", """ba""", """be""", """bg""", """bn""", """br""", """bs""", """ca""", """ceb""", """cs""", """cy""", """da""", """de""", """el""", """en""", """es""", """et""", """fa""", """ff""", """fi""", """fr""", """fy""", """ga""", """gd""", """gl""", """gu""", """ha""", """he""", """hi""", """hr""", """ht""", """hu""", """hy""", """id""", """ig""", """ilo""", """is""", """it""", """ja""", """jv""", """ka""", """kk""", """km""", """kn""", """ko""", """lb""", """lg""", """ln""", """lo""", """lt""", """lv""", """mg""", """mk""", """ml""", """mn""", """mr""", """ms""", """my""", """ne""", """nl""", """no""", """ns""", """oc""", """or""", """pa""", """pl""", """ps""", """pt""", """ro""", """ru""", """sd""", """si""", """sk""", """sl""", """so""", """sq""", """sr""", """ss""", """su""", """sv""", """sw""", """ta""", """th""", """tl""", """tn""", """tr""", """uk""", """ur""", """uz""", """vi""", """wo""", """xh""", """yi""", """yo""", """zh""", """zu"""],
"""wmt21""": ["""en""", """ha""", """is""", """ja""", """cs""", """ru""", """zh""", """de"""]
}
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = ['input_ids', 'attention_mask']
__lowerCamelCase = []
__lowerCamelCase = []
def __init__( self , lowercase , lowercase , lowercase=None , lowercase=None , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<pad>" , lowercase="<unk>" , lowercase="m2m100" , lowercase = None , lowercase=8 , **lowercase , ) -> None:
'''simple docstring'''
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
A__ = language_codes
A__ = FAIRSEQ_LANGUAGE_CODES[language_codes]
A__ = {lang_code: F'__{lang_code}__' for lang_code in fairseq_language_code}
A__ = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(lowercase )
for lang_code in fairseq_language_code
if self.get_lang_token(lowercase ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowercase , tgt_lang=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , unk_token=lowercase , pad_token=lowercase , language_codes=lowercase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=lowercase , **lowercase , )
A__ = vocab_file
A__ = load_json(lowercase )
A__ = {v: k for k, v in self.encoder.items()}
A__ = spm_file
A__ = load_spm(lowercase , self.sp_model_kwargs )
A__ = len(self.encoder )
A__ = {
self.get_lang_token(lowercase ): self.encoder_size + i for i, lang_code in enumerate(lowercase )
}
A__ = {lang_code: self.encoder_size + i for i, lang_code in enumerate(lowercase )}
A__ = {v: k for k, v in self.lang_token_to_id.items()}
A__ = src_lang if src_lang is not None else "en"
A__ = tgt_lang
A__ = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
A__ = num_madeup_words
@property
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCamelCase ( self , lowercase ) -> None:
'''simple docstring'''
A__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase ( self , lowercase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowercase , out_type=lowercase )
def UpperCamelCase ( self , lowercase ) -> Optional[Any]:
'''simple docstring'''
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(lowercase , self.encoder[self.unk_token] )
def UpperCamelCase ( self , lowercase ) -> str:
'''simple docstring'''
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(lowercase , self.unk_token )
def UpperCamelCase ( self , lowercase ) -> Optional[Any]:
'''simple docstring'''
A__ = []
A__ = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase ) + token
A__ = []
else:
current_sub_tokens.append(lowercase )
out_string += self.sp_model.decode(lowercase )
return out_string.strip()
def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
A__ = [1] * len(self.prefix_tokens )
A__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowercase )) + suffix_ones
return prefix_ones + ([0] * len(lowercase )) + ([0] * len(lowercase )) + suffix_ones
def UpperCamelCase ( self , lowercase , lowercase = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
'''simple docstring'''
A__ = self.__dict__.copy()
A__ = None
return state
def __setstate__( self , lowercase ) -> None:
'''simple docstring'''
A__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A__ = {}
A__ = load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCamelCase ( self , lowercase , lowercase = None ) -> Tuple[str]:
'''simple docstring'''
A__ = Path(lowercase )
if not save_dir.is_dir():
raise OSError(F'{save_directory} should be a directory' )
A__ = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
A__ = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , lowercase )
if os.path.abspath(self.spm_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , lowercase )
elif not os.path.isfile(self.spm_file ):
with open(lowercase , "wb" ) as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (str(lowercase ), str(lowercase ))
def UpperCamelCase ( self , lowercase , lowercase = "en" , lowercase = None , lowercase = "ro" , **lowercase , ) -> BatchEncoding:
'''simple docstring'''
A__ = src_lang
A__ = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(lowercase , lowercase , **lowercase )
def UpperCamelCase ( self , lowercase , lowercase , lowercase , **lowercase ) -> Optional[Any]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
A__ = src_lang
A__ = self(lowercase , add_special_tokens=lowercase , **lowercase )
A__ = self.get_lang_id(lowercase )
A__ = tgt_lang_id
return inputs
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase ( self , lowercase ) -> None:
'''simple docstring'''
A__ = self.get_lang_token(lowercase )
A__ = self.lang_token_to_id[lang_token]
A__ = [self.cur_lang_id]
A__ = [self.eos_token_id]
def UpperCamelCase ( self , lowercase ) -> None:
'''simple docstring'''
A__ = self.get_lang_token(lowercase )
A__ = self.lang_token_to_id[lang_token]
A__ = [self.cur_lang_id]
A__ = [self.eos_token_id]
def UpperCamelCase ( self , lowercase ) -> str:
'''simple docstring'''
return self.lang_code_to_token[lang]
def UpperCamelCase ( self , lowercase ) -> int:
'''simple docstring'''
A__ = self.get_lang_token(lowercase )
return self.lang_token_to_id[lang_token]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
'''simple docstring'''
A__ = sentencepiece.SentencePieceProcessor(**SCREAMING_SNAKE_CASE_ )
spm.Load(str(SCREAMING_SNAKE_CASE_ ) )
return spm
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> Union[Dict, List]:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE_ , "r" ) as f:
return json.load(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: str ) -> None:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE_ , "w" ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , indent=2 )
| 68 |
def _a ( UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F'''{price_plus_tax(1_00, 0.25) = }''')
print(F'''{price_plus_tax(125.50, 0.05) = }''')
| 142 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase__ ( A_, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ProphetNetTokenizer
_SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE__ ( self : Any ):
super().setUp()
lowerCAmelCase_ : Dict = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : str ):
lowerCAmelCase_ : int = "UNwant\u00E9d,running"
lowerCAmelCase_ : Dict = "unwanted, running"
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ : Optional[int] = self.tokenizer_class(self.vocab_file )
lowerCAmelCase_ : int = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(snake_case__ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowerCAmelCase_ : str = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowerCAmelCase_ : Optional[Any] = BasicTokenizer(do_lower_case=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowerCAmelCase_ : Any = BasicTokenizer(do_lower_case=snake_case__ , strip_accents=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowerCAmelCase_ : Tuple = BasicTokenizer(do_lower_case=snake_case__ , strip_accents=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ : Any = BasicTokenizer(do_lower_case=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : Tuple = BasicTokenizer(do_lower_case=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowerCAmelCase_ : List[Any] = BasicTokenizer(do_lower_case=snake_case__ , strip_accents=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowerCAmelCase_ : Any = BasicTokenizer(do_lower_case=snake_case__ , strip_accents=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowerCAmelCase_ : Tuple = BasicTokenizer(do_lower_case=snake_case__ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ : Tuple = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
lowerCAmelCase_ : List[str] = {}
for i, token in enumerate(snake_case__ ):
lowerCAmelCase_ : Optional[int] = i
lowerCAmelCase_ : Tuple = WordpieceTokenizer(vocab=snake_case__ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowerCAmelCase_ : Union[str, Any] = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
lowerCAmelCase_ : Optional[Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowerCAmelCase_ : Union[str, Any] = [1_0_3_7, 2_1_4_6, 2_0_4_2_3, 2_0_0_5, 7_6_8_0, 7_8_4_9, 3_9_8_9, 1_0_1_2, 1_0_2]
lowerCAmelCase_ : Optional[Any] = tokenizer(snake_case__ , padding=snake_case__ , return_tensors='pt' )
self.assertIsInstance(snake_case__ , snake_case__ )
lowerCAmelCase_ : Any = list(batch.input_ids.numpy()[0] )
self.assertListEqual(snake_case__ , snake_case__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : Tuple = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
lowerCAmelCase_ : Tuple = tokenizer.encode('sequence builders' , add_special_tokens=snake_case__ )
lowerCAmelCase_ : List[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=snake_case__ )
lowerCAmelCase_ : int = tokenizer.build_inputs_with_special_tokens(snake_case__ )
lowerCAmelCase_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ )
assert encoded_sentence == text + [1_0_2]
assert encoded_pair == text + [1_0_2] + text_a + [1_0_2]
| 360 |
"""simple docstring"""
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str ) -> List[Any]:
"""simple docstring"""
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def UpperCamelCase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str=0 ) -> Union[str, Any]:
"""simple docstring"""
return sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x[column] )
def UpperCamelCase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any]=float('inf' ) ) -> Optional[int]:
"""simple docstring"""
for i in range(points_counts - 1 ):
for j in range(i + 1 , lowerCAmelCase__ ):
lowerCAmelCase_ : Union[str, Any] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
lowerCAmelCase_ : Optional[int] = current_dis
return min_dis
def UpperCamelCase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any]=float('inf' ) ) -> Dict:
"""simple docstring"""
for i in range(min(6 , points_counts - 1 ) , lowerCAmelCase__ ):
for j in range(max(0 , i - 6 ) , lowerCAmelCase__ ):
lowerCAmelCase_ : Any = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
lowerCAmelCase_ : Union[str, Any] = current_dis
return min_dis
def UpperCamelCase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] ) -> Dict:
"""simple docstring"""
if points_counts <= 3:
return dis_between_closest_pair(lowerCAmelCase__ , lowerCAmelCase__ )
# recursion
lowerCAmelCase_ : int = points_counts // 2
lowerCAmelCase_ : Optional[Any] = closest_pair_of_points_sqr(
lowerCAmelCase__ , points_sorted_on_y[:mid] , lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = closest_pair_of_points_sqr(
lowerCAmelCase__ , points_sorted_on_y[mid:] , points_counts - mid )
lowerCAmelCase_ : Any = min(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase_ : str = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = dis_between_closest_in_strip(
lowerCAmelCase__ , len(lowerCAmelCase__ ) , lowerCAmelCase__ )
return min(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase_ : List[str] = column_based_sort(lowerCAmelCase__ , column=0 )
lowerCAmelCase_ : Dict = column_based_sort(lowerCAmelCase__ , column=1 )
return (
closest_pair_of_points_sqr(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
) ** 0.5
if __name__ == "__main__":
lowercase__ : List[str] = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)]
print("""Distance:""", closest_pair_of_points(points, len(points)))
| 289 | 0 |
import heapq
def lowerCamelCase__ ( A__ : dict ):
'''simple docstring'''
__lowerCamelCase = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(A__ , [-1 * len(A__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
__lowerCamelCase = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
__lowerCamelCase = heapq.heappop(A__ )[1][0]
chosen_vertices.add(A__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
__lowerCamelCase = elem[1][1].index(A__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(A__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 12 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = ['pixel_values']
def __init__( self: Any , UpperCamelCase_: bool = True , UpperCamelCase_: Union[int, float] = 1 / 2_55 , UpperCamelCase_: bool = True , UpperCamelCase_: int = 8 , **UpperCamelCase_: Tuple , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = do_rescale
__lowerCamelCase = rescale_factor
__lowerCamelCase = do_pad
__lowerCamelCase = pad_size
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: np.ndarray , UpperCamelCase_: float , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Tuple ):
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: np.ndarray , UpperCamelCase_: int , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None ):
__lowerCamelCase, __lowerCamelCase = get_image_size(UpperCamelCase_ )
__lowerCamelCase = (old_height // size + 1) * size - old_height
__lowerCamelCase = (old_width // size + 1) * size - old_width
return pad(UpperCamelCase_ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: ImageInput , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[float] = None , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[Union[str, TensorType]] = None , UpperCamelCase_: Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase_: Any , ):
__lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase = do_pad if do_pad is not None else self.do_pad
__lowerCamelCase = pad_size if pad_size is not None else self.pad_size
__lowerCamelCase = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
__lowerCamelCase = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_rescale:
__lowerCamelCase = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_pad:
__lowerCamelCase = [self.pad(UpperCamelCase_ , size=UpperCamelCase_ ) for image in images]
__lowerCamelCase = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
__lowerCamelCase = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 12 | 1 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = (PNDMScheduler,)
lowerCamelCase = (('''num_inference_steps''', 50),)
def UpperCAmelCase_ ( self , **_lowerCamelCase ) -> int:
A_ : Tuple = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**_lowerCamelCase )
return config
def UpperCAmelCase_ ( self , _lowerCamelCase=0 , **_lowerCamelCase ) -> Union[str, Any]:
A_ : Tuple = dict(self.forward_default_kwargs )
A_ : Tuple = kwargs.pop("""num_inference_steps""" , _lowerCamelCase )
A_ : Union[str, Any] = self.dummy_sample
A_ : str = 0.1 * sample
A_ : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ : Any = self.get_scheduler_config(**_lowerCamelCase )
A_ : List[Any] = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(_lowerCamelCase )
# copy over dummy past residuals
A_ : Optional[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowerCamelCase )
A_ : Tuple = scheduler_class.from_pretrained(_lowerCamelCase )
new_scheduler.set_timesteps(_lowerCamelCase )
# copy over dummy past residuals
A_ : Any = dummy_past_residuals[:]
A_ : List[str] = scheduler.step_prk(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
A_ : str = new_scheduler.step_prk(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A_ : Union[str, Any] = scheduler.step_plms(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
A_ : int = new_scheduler.step_plms(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase_ ( self ) -> List[Any]:
pass
def UpperCAmelCase_ ( self , _lowerCamelCase=0 , **_lowerCamelCase ) -> List[str]:
A_ : int = dict(self.forward_default_kwargs )
A_ : int = kwargs.pop("""num_inference_steps""" , _lowerCamelCase )
A_ : Dict = self.dummy_sample
A_ : str = 0.1 * sample
A_ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ : List[Any] = self.get_scheduler_config()
A_ : int = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(_lowerCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
A_ : Any = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowerCamelCase )
A_ : int = scheduler_class.from_pretrained(_lowerCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowerCamelCase )
# copy over dummy past residual (must be after setting timesteps)
A_ : Optional[int] = dummy_past_residuals[:]
A_ : List[Any] = scheduler.step_prk(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
A_ : Tuple = new_scheduler.step_prk(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A_ : Dict = scheduler.step_plms(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
A_ : Union[str, Any] = new_scheduler.step_plms(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase_ ( self , **_lowerCamelCase ) -> Union[str, Any]:
A_ : List[str] = self.scheduler_classes[0]
A_ : str = self.get_scheduler_config(**_lowerCamelCase )
A_ : List[Any] = scheduler_class(**_lowerCamelCase )
A_ : Optional[int] = 10
A_ : Union[str, Any] = self.dummy_model()
A_ : str = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCamelCase )
for i, t in enumerate(scheduler.prk_timesteps ):
A_ : str = model(_lowerCamelCase , _lowerCamelCase )
A_ : List[Any] = scheduler.step_prk(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
A_ : List[str] = model(_lowerCamelCase , _lowerCamelCase )
A_ : int = scheduler.step_plms(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ).prev_sample
return sample
def UpperCAmelCase_ ( self ) -> int:
A_ : str = dict(self.forward_default_kwargs )
A_ : Optional[int] = kwargs.pop("""num_inference_steps""" , _lowerCamelCase )
for scheduler_class in self.scheduler_classes:
A_ : Any = self.get_scheduler_config()
A_ : Tuple = scheduler_class(**_lowerCamelCase )
A_ : List[str] = self.dummy_sample
A_ : Union[str, Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(_lowerCamelCase , """set_timesteps""" ):
scheduler.set_timesteps(_lowerCamelCase )
elif num_inference_steps is not None and not hasattr(_lowerCamelCase , """set_timesteps""" ):
A_ : Any = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A_ : str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
A_ : List[Any] = dummy_past_residuals[:]
A_ : Union[str, Any] = scheduler.step_prk(_lowerCamelCase , 0 , _lowerCamelCase , **_lowerCamelCase ).prev_sample
A_ : List[str] = scheduler.step_prk(_lowerCamelCase , 1 , _lowerCamelCase , **_lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
A_ : Optional[Any] = scheduler.step_plms(_lowerCamelCase , 0 , _lowerCamelCase , **_lowerCamelCase ).prev_sample
A_ : List[Any] = scheduler.step_plms(_lowerCamelCase , 1 , _lowerCamelCase , **_lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_lowerCamelCase )
A_ : Any = self.scheduler_classes[0]
A_ : Tuple = self.get_scheduler_config(steps_offset=1 )
A_ : Tuple = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def UpperCAmelCase_ ( self ) -> Tuple:
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=_lowerCamelCase , beta_end=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
for t in [1, 5, 10]:
self.check_over_forward(time_step=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> str:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
A_ : int = 27
for scheduler_class in self.scheduler_classes:
A_ : int = self.dummy_sample
A_ : List[Any] = 0.1 * sample
A_ : Any = self.get_scheduler_config()
A_ : Optional[int] = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(_lowerCamelCase )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
A_ : List[Any] = scheduler.step_prk(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ).prev_sample
def UpperCAmelCase_ ( self ) -> List[Any]:
with self.assertRaises(_lowerCamelCase ):
A_ : Tuple = self.scheduler_classes[0]
A_ : Optional[int] = self.get_scheduler_config()
A_ : Optional[int] = scheduler_class(**_lowerCamelCase )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def UpperCAmelCase_ ( self ) -> int:
A_ : List[Any] = self.full_loop()
A_ : List[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
A_ : List[str] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 198.1318 ) < 1e-2
assert abs(result_mean.item() - 0.2580 ) < 1e-3
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : List[Any] = self.full_loop(prediction_type="""v_prediction""" )
A_ : Union[str, Any] = torch.sum(torch.abs(_lowerCamelCase ) )
A_ : Any = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 67.3986 ) < 1e-2
assert abs(result_mean.item() - 0.0878 ) < 1e-3
def UpperCAmelCase_ ( self ) -> Optional[Any]:
# We specify different beta, so that the first alpha is 0.99
A_ : Any = self.full_loop(set_alpha_to_one=_lowerCamelCase , beta_start=0.01 )
A_ : Union[str, Any] = torch.sum(torch.abs(_lowerCamelCase ) )
A_ : Tuple = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 230.0399 ) < 1e-2
assert abs(result_mean.item() - 0.2995 ) < 1e-3
def UpperCAmelCase_ ( self ) -> str:
# We specify different beta, so that the first alpha is 0.99
A_ : List[str] = self.full_loop(set_alpha_to_one=_lowerCamelCase , beta_start=0.01 )
A_ : str = torch.sum(torch.abs(_lowerCamelCase ) )
A_ : Optional[int] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 186.9482 ) < 1e-2
assert abs(result_mean.item() - 0.2434 ) < 1e-3
| 366 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase__ : Any = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Union[str, Any] = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : int = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
UpperCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 164 | 0 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
A =logging.get_logger(__name__)
A ={'vocab_file': 'spiece.model'}
A ={
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class _a ( __snake_case ):
def __init__( self : Tuple , lowercase : Tuple , lowercase : List[str]=False , lowercase : List[Any]=True , lowercase : Tuple=False , lowercase : List[Any]="<s>" , lowercase : Dict="</s>" , lowercase : Any="<unk>" , lowercase : Dict="<sep>" , lowercase : List[Any]="<pad>" , lowercase : List[str]="<cls>" , lowercase : Optional[Any]="<mask>" , lowercase : List[Any]=["<eop>", "<eod>"] , lowercase : Optional[Dict[str, Any]] = None , **lowercase : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCamelCase_ , remove_space=lowerCamelCase_ , keep_accents=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , )
UpperCAmelCase = 3
UpperCAmelCase = do_lower_case
UpperCAmelCase = remove_space
UpperCAmelCase = keep_accents
UpperCAmelCase = vocab_file
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase_ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
UpperCAmelCase = jieba
UpperCAmelCase = str.maketrans(''' \n''' , '''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def A ( self : Dict ):
'''simple docstring'''
return len(self.sp_model )
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = self.__dict__.copy()
UpperCAmelCase = None
return state
def __setstate__( self : Optional[Any] , lowercase : List[str] ):
'''simple docstring'''
UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase = {}
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A ( self : Optional[Any] , lowercase : int ):
'''simple docstring'''
if self.remove_space:
UpperCAmelCase = """ """.join(inputs.strip().split() )
else:
UpperCAmelCase = inputs
UpperCAmelCase = outputs.replace('''``''' , '''\"''' ).replace('''\'\'''' , '''\"''' )
if not self.keep_accents:
UpperCAmelCase = unicodedata.normalize('''NFKD''' , lowerCamelCase_ )
UpperCAmelCase = """""".join([c for c in outputs if not unicodedata.combining(lowerCamelCase_ )] )
if self.do_lower_case:
UpperCAmelCase = outputs.lower()
return outputs
def A ( self : Any , lowercase : str ):
'''simple docstring'''
UpperCAmelCase = self.preprocess_text(lowerCamelCase_ )
UpperCAmelCase = self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_ )
UpperCAmelCase = []
for piece in pieces:
if len(lowerCamelCase_ ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
UpperCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCamelCase_ , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
UpperCAmelCase = cur_pieces[1:]
else:
UpperCAmelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowerCamelCase_ )
else:
new_pieces.append(lowerCamelCase_ )
return new_pieces
def A ( self : str , lowercase : str ):
'''simple docstring'''
return self.sp_model.PieceToId(lowerCamelCase_ )
def A ( self : Dict , lowercase : int ):
'''simple docstring'''
return self.sp_model.IdToPiece(lowerCamelCase_ )
def A ( self : Optional[int] , lowercase : str ):
'''simple docstring'''
UpperCAmelCase = """""".join(lowerCamelCase_ ).replace(lowerCamelCase_ , ''' ''' ).strip()
return out_string
def A ( self : Tuple , lowercase : List[int] , lowercase : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def A ( self : Tuple , lowercase : List[int] , lowercase : Optional[List[int]] = None , lowercase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is not None:
return ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1, 1]
return ([0] * len(lowerCamelCase_ )) + [1, 1]
def A ( self : Union[str, Any] , lowercase : List[int] , lowercase : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def A ( self : Tuple , lowercase : str , lowercase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase_ , '''wb''' ) as fi:
UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_ )
return (out_vocab_file,)
def A ( self : Union[str, Any] , *lowercase : Any , **lowercase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = super()._decode(*lowerCamelCase_ , **lowerCamelCase_ )
UpperCAmelCase = text.replace(''' ''' , '''''' ).replace('''\u2582''' , ''' ''' ).replace('''\u2583''' , '''\n''' )
return text
| 34 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = True
A__ : bool = False
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> List[str]:
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Optional[int] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : List[Any] = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase_ : List[Any] = FlaxResnetBlockaD(
in_channels=lowerCamelCase_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : int = resnets
UpperCAmelCase_ : Tuple = attentions
if self.add_downsample:
UpperCAmelCase_ : List[Any] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Optional[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: int=True ) -> int:
UpperCAmelCase_ : List[Any] = ()
for resnet, attn in zip(self.resnets ,self.attentions ):
UpperCAmelCase_ : str = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase_ : List[Any] = self.downsamplers_a(lowerCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : bool = True
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> int:
UpperCAmelCase_ : List[str] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : int = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase_ : Dict = FlaxResnetBlockaD(
in_channels=lowerCamelCase_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = resnets
if self.add_downsample:
UpperCAmelCase_ : List[str] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any]=True ) -> Any:
UpperCAmelCase_ : Union[str, Any] = ()
for resnet in self.resnets:
UpperCAmelCase_ : Tuple = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase_ : List[str] = self.downsamplers_a(lowerCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = True
A__ : bool = False
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: str ) -> Any:
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : List[str] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : int = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase_ : int = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase_ : Optional[Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : int = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = resnets
UpperCAmelCase_ : Dict = attentions
if self.add_upsample:
UpperCAmelCase_ : Optional[Any] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Optional[int] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: Any ,lowerCamelCase_: str ,lowerCamelCase_: List[str]=True ) -> List[str]:
for resnet, attn in zip(self.resnets ,self.attentions ):
# pop res hidden states
UpperCAmelCase_ : List[str] = res_hidden_states_tuple[-1]
UpperCAmelCase_ : Union[str, Any] = res_hidden_states_tuple[:-1]
UpperCAmelCase_ : Optional[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
UpperCAmelCase_ : Tuple = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
if self.add_upsample:
UpperCAmelCase_ : Dict = self.upsamplers_a(lowerCamelCase_ )
return hidden_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : bool = True
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> Dict:
UpperCAmelCase_ : Any = []
for i in range(self.num_layers ):
UpperCAmelCase_ : str = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase_ : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase_ : Any = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : str = resnets
if self.add_upsample:
UpperCAmelCase_ : Union[str, Any] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Dict ,lowerCamelCase_: Dict ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any=True ) -> List[str]:
for resnet in self.resnets:
# pop res hidden states
UpperCAmelCase_ : Dict = res_hidden_states_tuple[-1]
UpperCAmelCase_ : str = res_hidden_states_tuple[:-1]
UpperCAmelCase_ : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
UpperCAmelCase_ : List[str] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
if self.add_upsample:
UpperCAmelCase_ : Optional[Any] = self.upsamplers_a(lowerCamelCase_ )
return hidden_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> List[str]:
# there is always at least one resnet
UpperCAmelCase_ : List[Any] = [
FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
]
UpperCAmelCase_ : Any = []
for _ in range(self.num_layers ):
UpperCAmelCase_ : Optional[Any] = FlaxTransformeraDModel(
in_channels=self.in_channels ,n_heads=self.num_attention_heads ,d_head=self.in_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : Any = FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Dict = resnets
UpperCAmelCase_ : Any = attentions
def __call__( self: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any]=True ) -> List[Any]:
UpperCAmelCase_ : List[Any] = self.resnets[0](lowerCamelCase_ ,lowerCamelCase_ )
for attn, resnet in zip(self.attentions ,self.resnets[1:] ):
UpperCAmelCase_ : Optional[Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
return hidden_states
| 345 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
UpperCamelCase : Tuple = ["small", "medium", "large"]
UpperCamelCase : Optional[Any] = "lm_head.decoder.weight"
UpperCamelCase : List[str] = "lm_head.weight"
def A ( snake_case :str , snake_case :str ) -> Optional[int]:
__UpperCamelCase = torch.load(snake_case )
__UpperCamelCase = d.pop(snake_case )
os.makedirs(snake_case , exist_ok=snake_case )
torch.save(snake_case , os.path.join(snake_case , snake_case ) )
if __name__ == "__main__":
UpperCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument("--dialogpt_path", default=".", type=str)
UpperCamelCase : Tuple = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
UpperCamelCase : Dict = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
UpperCamelCase : Any = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 263 |
"""simple docstring"""
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCamelCase : int = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = ["input_values", "attention_mask"]
def __init__( self , __UpperCAmelCase = 1 , __UpperCAmelCase = 1_6000 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = False , __UpperCAmelCase = 80 , __UpperCAmelCase = 16 , __UpperCAmelCase = 64 , __UpperCAmelCase = "hann_window" , __UpperCAmelCase = 1.0 , __UpperCAmelCase = 80 , __UpperCAmelCase = 7600 , __UpperCAmelCase = 1E-10 , __UpperCAmelCase = 2 , __UpperCAmelCase = True , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(feature_size=__UpperCAmelCase , sampling_rate=__UpperCAmelCase , padding_value=__UpperCAmelCase , **__UpperCAmelCase )
__UpperCamelCase = do_normalize
__UpperCamelCase = return_attention_mask
__UpperCamelCase = num_mel_bins
__UpperCamelCase = hop_length
__UpperCamelCase = win_length
__UpperCamelCase = win_function
__UpperCamelCase = frame_signal_scale
__UpperCamelCase = fmin
__UpperCamelCase = fmax
__UpperCamelCase = mel_floor
__UpperCamelCase = reduction_factor
__UpperCamelCase = win_length * sampling_rate // 1000
__UpperCamelCase = hop_length * sampling_rate // 1000
__UpperCamelCase = optimal_fft_length(self.sample_size )
__UpperCamelCase = (self.n_fft // 2) + 1
__UpperCamelCase = window_function(window_length=self.sample_size , name=self.win_function , periodic=__UpperCAmelCase )
__UpperCamelCase = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , )
if frame_signal_scale != 1.0:
warnings.warn(
'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , __UpperCAmelCase , )
if reduction_factor != 2.0:
warnings.warn(
'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , __UpperCAmelCase , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def UpperCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0.0 ):
'''simple docstring'''
if attention_mask is not None:
__UpperCamelCase = np.array(__UpperCAmelCase , np.intaa )
__UpperCamelCase = []
for vector, length in zip(__UpperCAmelCase , attention_mask.sum(-1 ) ):
__UpperCamelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
__UpperCamelCase = padding_value
normed_input_values.append(__UpperCAmelCase )
else:
__UpperCamelCase = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def UpperCAmelCase ( self , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = spectrogram(
__UpperCAmelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , )
return log_mel_spec.T
def __call__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
if audio is None and audio_target is None:
raise ValueError('You must provide either `audio` or `audio_target` values.' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if audio is not None:
__UpperCamelCase = self._process_audio(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , )
else:
__UpperCamelCase = None
if audio_target is not None:
__UpperCamelCase = self._process_audio(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , )
if inputs is None:
return inputs_target
else:
__UpperCamelCase = inputs_target['input_values']
__UpperCamelCase = inputs_target.get('attention_mask' )
if decoder_attention_mask is not None:
__UpperCamelCase = decoder_attention_mask
return inputs
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = isinstance(__UpperCAmelCase , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
__UpperCamelCase = is_batched_numpy or (
isinstance(__UpperCAmelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__UpperCamelCase = [np.asarray(__UpperCAmelCase , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(__UpperCAmelCase , np.ndarray ):
__UpperCamelCase = np.asarray(__UpperCAmelCase , dtype=np.floataa )
elif isinstance(__UpperCAmelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
__UpperCamelCase = speech.astype(np.floataa )
# always return batch
if not is_batched:
__UpperCamelCase = [speech]
# needed to make pad() work on spectrogram inputs
__UpperCamelCase = self.feature_size
# convert into correct format for padding
if is_target:
__UpperCamelCase = [self._extract_mel_features(__UpperCAmelCase ) for waveform in speech]
__UpperCamelCase = BatchFeature({'input_values': features} )
__UpperCamelCase = self.num_mel_bins
else:
__UpperCamelCase = BatchFeature({'input_values': speech} )
__UpperCamelCase = self.pad(
__UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCamelCase = feature_size_hack
# convert input values to correct format
__UpperCamelCase = padded_inputs['input_values']
if not isinstance(input_values[0] , np.ndarray ):
__UpperCamelCase = [np.asarray(__UpperCAmelCase , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(__UpperCAmelCase , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
__UpperCamelCase = [array.astype(np.floataa ) for array in input_values]
elif isinstance(__UpperCAmelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
__UpperCamelCase = input_values.astype(np.floataa )
# convert attention_mask to correct format
__UpperCamelCase = padded_inputs.get('attention_mask' )
if attention_mask is not None:
__UpperCamelCase = [np.asarray(__UpperCAmelCase , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
__UpperCamelCase = (
attention_mask
if self._get_padding_strategies(__UpperCAmelCase , max_length=__UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__UpperCamelCase = self.zero_mean_unit_var_norm(
padded_inputs['input_values'] , attention_mask=__UpperCAmelCase , padding_value=self.padding_value )
if return_tensors is not None:
__UpperCamelCase = padded_inputs.convert_to_tensors(__UpperCAmelCase )
return padded_inputs
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = super().to_dict()
# Don't serialize these as they are derived from the other properties.
__UpperCamelCase = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs']
for name in names:
if name in output:
del output[name]
return output
| 263 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a = {
"configuration_rag": ["RagConfig"],
"retrieval_rag": ["RagRetriever"],
"tokenization_rag": ["RagTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"RagModel",
"RagPreTrainedModel",
"RagSequenceForGeneration",
"RagTokenForGeneration",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"TFRagModel",
"TFRagPreTrainedModel",
"TFRagSequenceForGeneration",
"TFRagTokenForGeneration",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 35 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__a = logging.get_logger(__name__)
__a = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__a = {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"
),
}
}
__a = {
"junnyu/roformer_chinese_small": 1536,
"junnyu/roformer_chinese_base": 1536,
"junnyu/roformer_chinese_char_small": 512,
"junnyu/roformer_chinese_char_base": 512,
"junnyu/roformer_small_discriminator": 128,
"junnyu/roformer_small_generator": 128,
}
__a = {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = PRETRAINED_INIT_CONFIGURATION
lowercase = RoFormerTokenizer
def __init__( self : List[Any] , snake_case_ : List[str]=None , snake_case_ : Dict=None , snake_case_ : Any=True , snake_case_ : str="[UNK]" , snake_case_ : List[str]="[SEP]" , snake_case_ : Optional[Any]="[PAD]" , snake_case_ : Union[str, Any]="[CLS]" , snake_case_ : Union[str, Any]="[MASK]" , snake_case_ : List[Any]=True , snake_case_ : Optional[Any]=None , **snake_case_ : Tuple , ):
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
snake_case__ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("""lowercase""" , snake_case_ ) != do_lower_case
or pre_tok_state.get("""strip_accents""" , snake_case_ ) != strip_accents
):
snake_case__ : str = getattr(snake_case_ , pre_tok_state.pop("""type""" ) )
snake_case__ : Optional[int] = do_lower_case
snake_case__ : Union[str, Any] = strip_accents
snake_case__ : Union[str, Any] = pre_tok_class(**snake_case_ )
snake_case__ : str = do_lower_case
def __getstate__( self : int ):
snake_case__ : List[Any] = self.__dict__.copy()
snake_case__ : str = BertPreTokenizer()
return state
def __setstate__( self : Dict , snake_case_ : Dict ):
snake_case__ : List[Any] = d
snake_case__ : Union[str, Any] = self.__dict__["""_tokenizer"""].get_vocab()
snake_case__ : List[Any] = PreTokenizer.custom(JiebaPreTokenizer(snake_case_ ) )
def lowerCamelCase ( self : str , snake_case_ : Optional[Any] , snake_case_ : List[str]=None ):
snake_case__ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase ( self : str , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
snake_case__ : int = [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase ( self : Dict , snake_case_ : str , snake_case_ : Optional[str] = None ):
snake_case__ : Union[str, Any] = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def lowerCamelCase ( self : Dict , snake_case_ : List[str] , snake_case_ : Tuple=None , snake_case_ : List[str]=None , snake_case_ : Union[str, Any]=False , **snake_case_ : Tuple , ):
snake_case__ : Optional[Any] = BertPreTokenizer()
return super().save_pretrained(snake_case_ , snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
| 35 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {"vocab_file": "spiece.model"}
UpperCamelCase = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class __lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : Optional[int]="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : str="<unk>" , SCREAMING_SNAKE_CASE__ : List[Any]="<sep>" , SCREAMING_SNAKE_CASE__ : Dict="<pad>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<cls>" , SCREAMING_SNAKE_CASE__ : str="<mask>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=["<eop>", "<eod>"] , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : int , ) -> None:
lowerCAmelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
lowerCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=SCREAMING_SNAKE_CASE__ , remove_space=SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = 3
lowerCAmelCase__ = do_lower_case
lowerCAmelCase__ = remove_space
lowerCAmelCase__ = keep_accents
lowerCAmelCase__ = vocab_file
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE__ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
lowerCAmelCase__ = jieba
lowerCAmelCase__ = str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def a ( self : List[str] ) -> int:
return len(self.sp_model )
def a ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase__ = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ) -> Optional[Any]:
lowerCAmelCase__ = self.__dict__.copy()
lowerCAmelCase__ = None
return state
def __setstate__( self : Tuple , SCREAMING_SNAKE_CASE__ : Dict ) -> int:
lowerCAmelCase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCAmelCase__ = {}
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[Any]:
if self.remove_space:
lowerCAmelCase__ = """ """.join(inputs.strip().split() )
else:
lowerCAmelCase__ = inputs
lowerCAmelCase__ = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
lowerCAmelCase__ = unicodedata.normalize("NFKD" , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = """""".join([c for c in outputs if not unicodedata.combining(SCREAMING_SNAKE_CASE__ )] )
if self.do_lower_case:
lowerCAmelCase__ = outputs.lower()
return outputs
def a ( self : str , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
lowerCAmelCase__ = self.preprocess_text(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = []
for piece in pieces:
if len(SCREAMING_SNAKE_CASE__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
lowerCAmelCase__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(SCREAMING_SNAKE_CASE__ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCAmelCase__ = cur_pieces[1:]
else:
lowerCAmelCase__ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(SCREAMING_SNAKE_CASE__ )
else:
new_pieces.append(SCREAMING_SNAKE_CASE__ )
return new_pieces
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : str ) -> Optional[int]:
return self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ )
def a ( self : str , SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[int]:
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ )
def a ( self : str , SCREAMING_SNAKE_CASE__ : Tuple ) -> int:
lowerCAmelCase__ = """""".join(SCREAMING_SNAKE_CASE__ ).replace(SCREAMING_SNAKE_CASE__ , " " ).strip()
return out_string
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def a ( self : str , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is not None:
return ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1]
return ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1]
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , "wb" ) as fi:
lowerCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
def a ( self : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[Any]:
lowerCAmelCase__ = super()._decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 351 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any]=13 , SCREAMING_SNAKE_CASE__ : Optional[Any]=7 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : int=99 , SCREAMING_SNAKE_CASE__ : List[str]=32 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : List[str]=4 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=37 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=512 , SCREAMING_SNAKE_CASE__ : List[Any]=16 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE__ : Tuple=3 , SCREAMING_SNAKE_CASE__ : int=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : str=0 , ) -> List[Any]:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
lowerCAmelCase__ = projection_dim
def a ( self : Any ) -> Any:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , )
lowerCAmelCase__ = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[str]:
lowerCAmelCase__ = TFDPRContextEncoder(config=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
lowerCAmelCase__ = TFDPRQuestionEncoder(config=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple ) -> List[Any]:
lowerCAmelCase__ = TFDPRReader(config=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def a ( self : Optional[int] ) -> Union[str, Any]:
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {"input_ids": input_ids}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case__ = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
snake_case__ = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def a ( self : Dict ) -> int:
lowerCAmelCase__ = TFDPRModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def a ( self : List[Any] ) -> List[Any]:
self.config_tester.run_common_tests()
def a ( self : Tuple ) -> Dict:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*SCREAMING_SNAKE_CASE__ )
def a ( self : Union[str, Any] ) -> Optional[int]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*SCREAMING_SNAKE_CASE__ )
def a ( self : List[Any] ) -> Dict:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*SCREAMING_SNAKE_CASE__ )
@slow
def a ( self : List[str] ) -> Union[str, Any]:
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFDPRContextEncoder.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFDPRContextEncoder.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFDPRQuestionEncoder.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFDPRReader.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def a ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase__ = TFDPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base" )
lowerCAmelCase__ = tf.constant(
[[101, 7_592, 1_010, 2_003, 2_026, 3_899, 10_140, 1_029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowerCAmelCase__ = tf.constant(
[
[
0.03_236_253,
0.12_753_335,
0.16_818_509,
0.00_279_786,
0.3_896_933,
0.24_264_945,
0.2_178_971,
-0.02_335_227,
-0.08_481_959,
-0.14_324_117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 221 | 0 |
"""simple docstring"""
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowercase_ ( _snake_case ):
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(_snake_case ):
return ext
raise Exception(
f'''Unable to determine file format from file extension {path}. '''
f'''Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}''' )
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : int = pipeline(
task=args.task ,model=args.model if args.model else None ,config=args.config ,tokenizer=args.tokenizer ,device=args.device ,)
SCREAMING_SNAKE_CASE__ : Tuple = try_infer_format_from_ext(args.input ) if args.format == """infer""" else args.format
SCREAMING_SNAKE_CASE__ : Tuple = PipelineDataFormat.from_str(
format=_snake_case ,output_path=args.output ,input_path=args.input ,column=args.column if args.column else nlp.default_input_names ,overwrite=args.overwrite ,)
return RunCommand(_snake_case ,_snake_case )
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = nlp
SCREAMING_SNAKE_CASE__ : Tuple = reader
@staticmethod
def __magic_name__ (SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.add_parser("""run""" , help="""Run a pipeline through the CLI""" )
run_parser.add_argument("""--task""" , choices=get_supported_tasks() , help="""Task to run""" )
run_parser.add_argument("""--input""" , type=SCREAMING_SNAKE_CASE__ , help="""Path to the file to use for inference""" )
run_parser.add_argument("""--output""" , type=SCREAMING_SNAKE_CASE__ , help="""Path to the file that will be used post to write results.""" )
run_parser.add_argument("""--model""" , type=SCREAMING_SNAKE_CASE__ , help="""Name or path to the model to instantiate.""" )
run_parser.add_argument("""--config""" , type=SCREAMING_SNAKE_CASE__ , help="""Name or path to the model's config to instantiate.""" )
run_parser.add_argument(
"""--tokenizer""" , type=SCREAMING_SNAKE_CASE__ , help="""Name of the tokenizer to use. (default: same as the model name)""" )
run_parser.add_argument(
"""--column""" , type=SCREAMING_SNAKE_CASE__ , help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" , )
run_parser.add_argument(
"""--format""" , type=SCREAMING_SNAKE_CASE__ , default="""infer""" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="""Input format to read from""" , )
run_parser.add_argument(
"""--device""" , type=SCREAMING_SNAKE_CASE__ , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
run_parser.add_argument("""--overwrite""" , action="""store_true""" , help="""Allow overwriting the output file.""" )
run_parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self._nlp, []
for entry in self._reader:
SCREAMING_SNAKE_CASE__ : List[Any] = nlp(**SCREAMING_SNAKE_CASE__ ) if self._reader.is_multi_columns else nlp(SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
outputs.append(SCREAMING_SNAKE_CASE__ )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
SCREAMING_SNAKE_CASE__ : Tuple = self._reader.save_binary(SCREAMING_SNAKE_CASE__ )
logger.warning(F'''Current pipeline requires output to be in binary format, saving at {binary_path}''' )
else:
self._reader.save(SCREAMING_SNAKE_CASE__ )
| 25 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
UpperCAmelCase__ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(a__ )
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
requires_backends(self , """vision""" )
self.check_model_type(SCREAMING_SNAKE_CASE__ )
def __call__(self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
return super().__call__(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
return {}, {}, {}
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = load_image(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = image.size
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors=self.framework )
return model_inputs
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model(**SCREAMING_SNAKE_CASE__ )
return model_outputs
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = model_outputs.predicted_depth
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = prediction.squeeze().cpu().numpy()
SCREAMING_SNAKE_CASE__ : Any = (output * 2_55 / np.max(SCREAMING_SNAKE_CASE__ )).astype("""uint8""" )
SCREAMING_SNAKE_CASE__ : List[str] = Image.fromarray(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = {}
SCREAMING_SNAKE_CASE__ : Any = predicted_depth
SCREAMING_SNAKE_CASE__ : Dict = depth
return output_dict
| 25 | 1 |
"""simple docstring"""
from __future__ import annotations
UpperCamelCase_ = list[tuple[int, int]]
UpperCamelCase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCamelCase_ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Union[str, Any]:
a_ = pos_x
a_ = pos_y
a_ = (pos_y, pos_x)
a_ = goal_x
a_ = goal_y
a_ = g_cost
a_ = parent
a_ = self.calculate_heuristic()
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = abs(self.pos_x - self.goal_x)
a_ = abs(self.pos_y - self.goal_y)
return dx + dy
def __lt__( self , __UpperCAmelCase) ->str:
return self.f_cost < other.f_cost
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase) ->Optional[int]:
a_ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _lowercase)
a_ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , _lowercase)
a_ = [self.start]
a_ = []
a_ = False
def UpperCAmelCase__ ( self) ->List[str]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
a_ = self.open_nodes.pop(0)
if current_node.pos == self.target.pos:
a_ = True
return self.retrace_path(_lowercase)
self.closed_nodes.append(_lowercase)
a_ = self.get_successors(_lowercase)
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_lowercase)
else:
# retrieve the best current path
a_ = self.open_nodes.pop(self.open_nodes.index(_lowercase))
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_lowercase)
else:
self.open_nodes.append(_lowercase)
if not self.reached:
return [self.start.pos]
return None
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Dict:
a_ = []
for action in delta:
a_ = parent.pos_x + action[1]
a_ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(_lowercase) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_lowercase , _lowercase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _lowercase , ))
return successors
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Union[str, Any]:
a_ = node
a_ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
a_ = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
UpperCamelCase_ = (0, 0)
UpperCamelCase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('------')
UpperCamelCase_ = GreedyBestFirst(init, goal)
UpperCamelCase_ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
UpperCamelCase_ = 2
for elem in grid:
print(elem)
| 370 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 303 | 0 |
"""simple docstring"""
_a : dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.60_93_44,
"knot": 1.8_52,
}
_a : dict[str, float] = {
"km/h": 1.0,
"m/s": 0.2_77_77_77_78,
"mph": 0.6_21_37_11_92,
"knot": 0.5_39_95_68_03,
}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : float ,_lowerCamelCase : str ,_lowerCamelCase : str ) -> float:
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
_lowerCAmelCase : Any = (
f"Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n"
f"Valid values are: {', '.join(_lowerCamelCase )}"
)
raise ValueError(_lowerCamelCase )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] ,3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 |
"""simple docstring"""
from math import ceil
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Union[str, Any] ) -> int:
_lowerCAmelCase : Dict = list(range(0 ,_lowerCamelCase ) )
_lowerCAmelCase : Tuple = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
_lowerCAmelCase : Union[str, Any] = []
for i in device_map_blocks:
if device_map_blocks.count(_lowerCamelCase ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(_lowerCamelCase )
# Missing blocks
_lowerCAmelCase : int = [i for i in blocks if i not in device_map_blocks]
_lowerCAmelCase : List[Any] = [i for i in device_map_blocks if i not in blocks]
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(_lowerCamelCase ) )
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(_lowerCamelCase ) )
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : Tuple ) -> str:
_lowerCAmelCase : Optional[Any] = list(range(_lowerCamelCase ) )
_lowerCAmelCase : Optional[Any] = int(ceil(n_layers / len(_lowerCamelCase ) ) )
_lowerCAmelCase : Optional[int] = [layers[i : i + n_blocks] for i in range(0 ,_lowerCamelCase ,_lowerCamelCase )]
return dict(zip(_lowerCamelCase ,_lowerCamelCase ) )
| 44 | 1 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_lowercase : Dict = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class _UpperCAmelCase :
a__ : Union[str, Any] = PegasusConfig
a__ : int = {}
a__ : Union[str, Any] = "gelu"
def __init__( self : Any , _lowercase : Dict , _lowercase : Tuple=13 , _lowercase : Optional[int]=7 , _lowercase : str=True , _lowercase : str=False , _lowercase : Any=99 , _lowercase : Tuple=32 , _lowercase : str=5 , _lowercase : int=4 , _lowercase : Optional[int]=37 , _lowercase : Tuple=0.1 , _lowercase : str=0.1 , _lowercase : Optional[int]=20 , _lowercase : Optional[int]=2 , _lowercase : Union[str, Any]=1 , _lowercase : Optional[int]=0 , ):
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = seq_length
__UpperCAmelCase = is_training
__UpperCAmelCase = use_labels
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = eos_token_id
__UpperCAmelCase = pad_token_id
__UpperCAmelCase = bos_token_id
def a ( self : Tuple ):
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
__UpperCAmelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCAmelCase = np.concatenate([input_ids, eos_tensor] , axis=1 )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__UpperCAmelCase = prepare_pegasus_inputs_dict(__a , __a , __a )
return config, inputs_dict
def a ( self : Union[str, Any] , _lowercase : Dict , _lowercase : int , _lowercase : int ):
__UpperCAmelCase = 20
__UpperCAmelCase = model_class_name(__a )
__UpperCAmelCase = model.encode(inputs_dict['''input_ids'''] )
__UpperCAmelCase = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __a , __a )
__UpperCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
__UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , __a , decoder_attention_mask=__a , past_key_values=__a , decoder_position_ids=__a , )
__UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , __a , decoder_attention_mask=__a , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__a , )
__UpperCAmelCase = model.decode(__a , __a )
__UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def a ( self : Any , _lowercase : Any , _lowercase : List[str] , _lowercase : Union[str, Any] ):
__UpperCAmelCase = 20
__UpperCAmelCase = model_class_name(__a )
__UpperCAmelCase = model.encode(inputs_dict['''input_ids'''] )
__UpperCAmelCase = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__UpperCAmelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __a , __a )
__UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , __a , decoder_attention_mask=__a , past_key_values=__a , decoder_position_ids=__a , )
__UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , __a , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__a , decoder_position_ids=__a , )
__UpperCAmelCase = model.decode(__a , __a , decoder_attention_mask=__a )
__UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def lowercase__ ( snake_case_ :int , snake_case_ :Union[str, Any] , snake_case_ :str , snake_case_ :str=None , snake_case_ :Optional[int]=None , ):
if attention_mask is None:
__UpperCAmelCase = np.not_equal(_UpperCAmelCase , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
__UpperCAmelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a__ : Optional[Any] = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
a__ : Dict = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
a__ : Dict = True
a__ : List[Any] = False
a__ : Dict = False
a__ : Dict = False
def a ( self : List[Any] ):
__UpperCAmelCase = FlaxPegasusModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=__a )
def a ( self : List[Any] ):
self.config_tester.run_common_tests()
def a ( self : Any ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__a , __a , __a )
def a ( self : Tuple ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__a , __a , __a )
def a ( self : Optional[Any] ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCAmelCase = self._prepare_for_class(__a , __a )
__UpperCAmelCase = model_class(__a )
@jax.jit
def encode_jitted(_lowercase : Union[str, Any] , _lowercase : Optional[Any]=None , **_lowercase : Any ):
return model.encode(input_ids=__a , attention_mask=__a )
with self.subTest('''JIT Enabled''' ):
__UpperCAmelCase = encode_jitted(**__a ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__UpperCAmelCase = encode_jitted(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) )
for jitted_output, output in zip(__a , __a ):
self.assertEqual(jitted_output.shape , output.shape )
def a ( self : Optional[int] ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCAmelCase = model_class(__a )
__UpperCAmelCase = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
__UpperCAmelCase = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(_lowercase : List[str] , _lowercase : Dict , _lowercase : int ):
return model.decode(
decoder_input_ids=__a , decoder_attention_mask=__a , encoder_outputs=__a , )
with self.subTest('''JIT Enabled''' ):
__UpperCAmelCase = decode_jitted(**__a ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__UpperCAmelCase = decode_jitted(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) )
for jitted_output, output in zip(__a , __a ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def a ( self : Dict ):
for model_class_name in self.all_model_classes:
__UpperCAmelCase = model_class_name.from_pretrained('''google/pegasus-large''' , from_pt=__a )
__UpperCAmelCase = np.ones((1, 1) )
__UpperCAmelCase = model(__a )
self.assertIsNotNone(__a )
@slow
def a ( self : str ):
__UpperCAmelCase = FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''' )
__UpperCAmelCase = PegasusTokenizer.from_pretrained('''google/pegasus-xsum''' )
__UpperCAmelCase = [
' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.',
' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ',
]
__UpperCAmelCase = [
'California\'s largest electricity provider has turned off power to hundreds of thousands of customers.',
'Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.',
]
__UpperCAmelCase = tokenizer(__a , return_tensors='''np''' , truncation=__a , max_length=5_12 , padding=__a )
__UpperCAmelCase = model.generate(**__a , num_beams=2 ).sequences
__UpperCAmelCase = tokenizer.batch_decode(__a , skip_special_tokens=__a )
assert tgt_text == decoded
| 350 |
"""simple docstring"""
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_lowercase : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(_lowerCAmelCase )
class _UpperCAmelCase ( _lowerCAmelCase ):
def __init__( self : str , *_lowercase : Tuple , **_lowercase : List[Any] ):
super().__init__(*_lowercase , **_lowercase )
self.check_model_type(_lowercase )
def a ( self : int , _lowercase : Dict=None , _lowercase : List[Any]=None , _lowercase : int=None , **_lowercase : Dict ):
__UpperCAmelCase , __UpperCAmelCase = {}, {}
if padding is not None:
__UpperCAmelCase = padding
if truncation is not None:
__UpperCAmelCase = truncation
if top_k is not None:
__UpperCAmelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[str] , _lowercase : Union["Image.Image", str] , _lowercase : str = None , **_lowercase : Optional[Any] ):
if isinstance(_lowercase , (Image.Image, str) ) and isinstance(_lowercase , _lowercase ):
__UpperCAmelCase = {'''image''': image, '''question''': question}
else:
__UpperCAmelCase = image
__UpperCAmelCase = super().__call__(_lowercase , **_lowercase )
return results
def a ( self : Union[str, Any] , _lowercase : List[str] , _lowercase : Any=False , _lowercase : Union[str, Any]=False ):
__UpperCAmelCase = load_image(inputs['''image'''] )
__UpperCAmelCase = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=_lowercase , truncation=_lowercase )
__UpperCAmelCase = self.image_processor(images=_lowercase , return_tensors=self.framework )
model_inputs.update(_lowercase )
return model_inputs
def a ( self : Optional[Any] , _lowercase : str ):
__UpperCAmelCase = self.model(**_lowercase )
return model_outputs
def a ( self : str , _lowercase : Optional[int] , _lowercase : Any=5 ):
if top_k > self.model.config.num_labels:
__UpperCAmelCase = self.model.config.num_labels
if self.framework == "pt":
__UpperCAmelCase = model_outputs.logits.sigmoid()[0]
__UpperCAmelCase , __UpperCAmelCase = probs.topk(_lowercase )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
__UpperCAmelCase = scores.tolist()
__UpperCAmelCase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_lowercase , _lowercase )]
| 86 | 0 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
__lowercase = parent
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
return {}
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = '''<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>'''
__lowercase = '''
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
'''
return [html_string_a, html_string_a]
@require_bsa
class _UpperCamelCase ( _UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
__a : str = MarkupLMFeatureExtractor if is_bsa_available() else None
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = MarkupLMFeatureExtractionTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
return self.feature_extract_tester.prepare_feat_extract_dict()
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.feature_extraction_class()
# Test not batched input
__lowercase = get_html_strings()[0]
__lowercase = feature_extractor(lowerCAmelCase__ )
# fmt: off
__lowercase = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
__lowercase = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes , lowerCAmelCase__ )
self.assertEqual(encoding.xpaths , lowerCAmelCase__ )
# Test batched
__lowercase = get_html_strings()
__lowercase = feature_extractor(lowerCAmelCase__ )
# fmt: off
__lowercase = expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
__lowercase = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , lowerCAmelCase__ )
self.assertEqual(encoding.xpaths , lowerCAmelCase__ )
| 210 |
import requests
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = {'''Content-Type''': '''application/json'''}
__lowercase = requests.post(lowercase , json={'''text''': message_body} , headers=lowercase )
if response.status_code != 200:
__lowercase = (
'''Request to slack returned an error '''
F"{response.status_code}, the response is:\n{response.text}"
)
raise ValueError(lowercase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
| 210 | 1 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger()
@dataclass
class _SCREAMING_SNAKE_CASE :
__SCREAMING_SNAKE_CASE :nn.Module
__SCREAMING_SNAKE_CASE :List[nn.Module] = field(default_factory=__a )
__SCREAMING_SNAKE_CASE :list = field(default_factory=__a )
def snake_case__ ( self : List[str] , a__ : str , a__ : Tensor , a__ : Tensor ):
__magic_name__ = len(list(m.modules() ) ) == 1 or isinstance(a__ , nn.Convad ) or isinstance(a__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(a__ )
def __call__( self : List[Any] , a__ : Tensor ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(a__ )
[x.remove() for x in self.handles]
return self
@property
def snake_case__ ( self : Dict ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda a__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _SCREAMING_SNAKE_CASE :
__SCREAMING_SNAKE_CASE :nn.Module
__SCREAMING_SNAKE_CASE :nn.Module
__SCREAMING_SNAKE_CASE :int = 0
__SCREAMING_SNAKE_CASE :List = field(default_factory=__a )
__SCREAMING_SNAKE_CASE :List = field(default_factory=__a )
def __call__( self : int , a__ : Tensor ):
__magic_name__ = Tracker(self.dest )(a__ ).parametrized
__magic_name__ = Tracker(self.src )(a__ ).parametrized
__magic_name__ = list(filter(lambda a__ : type(a__ ) not in self.src_skip , a__ ) )
__magic_name__ = list(filter(lambda a__ : type(a__ ) not in self.dest_skip , a__ ) )
if len(a__ ) != len(a__ ):
raise Exception(
F'''Numbers of operations are different. Source module has {len(a__ )} operations while'''
F''' destination module has {len(a__ )}.''' )
for dest_m, src_m in zip(a__ , a__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
def UpperCamelCase ( a , a , a , a = True ) -> Any:
'''simple docstring'''
print(F'''Converting {name}...''' )
with torch.no_grad():
__magic_name__ = timm.create_model(a , pretrained=a ).eval()
__magic_name__ = ResNetForImageClassification(a ).eval()
__magic_name__ = ModuleTransfer(src=a , dest=a )
__magic_name__ = torch.randn((1, 3, 224, 224) )
module_transfer(a )
assert torch.allclose(from_model(a ) , our_model(a ).logits ), "The model logits don't match the original one."
__magic_name__ = F'''resnet{"-".join(name.split("resnet" ) )}'''
print(a )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=a , )
# we can use the convnext one
__magic_name__ = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=a , )
print(F'''Pushed {checkpoint_name}''' )
def UpperCamelCase ( a , a = None , a = True ) -> int:
'''simple docstring'''
__magic_name__ = '''imagenet-1k-id2label.json'''
__magic_name__ = 1000
__magic_name__ = (1, num_labels)
__magic_name__ = '''huggingface/label-files'''
__magic_name__ = num_labels
__magic_name__ = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) )
__magic_name__ = {int(a ): v for k, v in idalabel.items()}
__magic_name__ = idalabel
__magic_name__ = {v: k for k, v in idalabel.items()}
__magic_name__ = partial(a , num_labels=a , idalabel=a , labelaid=a )
__magic_name__ = {
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
}
if model_name:
convert_weight_and_push(a , names_to_config[model_name] , a , a )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(a , a , a , a )
return config, expected_shape
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported resnet* architecture,"
" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 98 |
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Tuple , a__ : Union[str, Any] , a__ : Union[str, Any]=7 , a__ : Dict=3 , a__ : Optional[Any]=18 , a__ : Optional[Any]=30 , a__ : Tuple=400 , a__ : Optional[int]=True , a__ : int=None , a__ : Union[str, Any]=True , a__ : Optional[Any]=None , a__ : str=True , a__ : List[Any]=[0.5, 0.5, 0.5] , a__ : Tuple=[0.5, 0.5, 0.5] , a__ : Union[str, Any]=False , ):
__magic_name__ = size if size is not None else {'''height''': 20, '''width''': 20}
__magic_name__ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = num_channels
__magic_name__ = image_size
__magic_name__ = min_resolution
__magic_name__ = max_resolution
__magic_name__ = do_resize
__magic_name__ = size
__magic_name__ = do_center_crop
__magic_name__ = crop_size
__magic_name__ = do_normalize
__magic_name__ = image_mean
__magic_name__ = image_std
__magic_name__ = do_reduce_labels
def snake_case__ ( self : Optional[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def UpperCamelCase ( ) -> str:
'''simple docstring'''
__magic_name__ = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
__magic_name__ = Image.open(dataset[0]['''file'''] )
__magic_name__ = Image.open(dataset[1]['''file'''] )
return image, map
def UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
__magic_name__ = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
__magic_name__ = Image.open(ds[0]['''file'''] )
__magic_name__ = Image.open(ds[1]['''file'''] )
__magic_name__ = Image.open(ds[2]['''file'''] )
__magic_name__ = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( __a ,unittest.TestCase ):
__SCREAMING_SNAKE_CASE :Optional[Any] = BeitImageProcessor if is_vision_available() else None
def snake_case__ ( self : Dict ):
__magic_name__ = BeitImageProcessingTester(self )
@property
def snake_case__ ( self : Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self : Optional[Any] ):
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , '''do_resize''' ) )
self.assertTrue(hasattr(a__ , '''size''' ) )
self.assertTrue(hasattr(a__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(a__ , '''center_crop''' ) )
self.assertTrue(hasattr(a__ , '''do_normalize''' ) )
self.assertTrue(hasattr(a__ , '''image_mean''' ) )
self.assertTrue(hasattr(a__ , '''image_std''' ) )
def snake_case__ ( self : int ):
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , a__ )
__magic_name__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=a__ )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , a__ )
def snake_case__ ( self : Optional[Any] ):
pass
def snake_case__ ( self : Dict ):
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__magic_name__ = image_processing(a__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case__ ( self : Optional[int] ):
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , numpify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , np.ndarray )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__magic_name__ = image_processing(a__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case__ ( self : List[str] ):
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__magic_name__ = image_processing(a__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case__ ( self : Union[str, Any] ):
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ )
__magic_name__ = []
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched
__magic_name__ = image_processing(a__ , a__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test not batched input (PIL images)
__magic_name__ , __magic_name__ = prepare_semantic_single_inputs()
__magic_name__ = image_processing(a__ , a__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched input (PIL images)
__magic_name__ , __magic_name__ = prepare_semantic_batch_inputs()
__magic_name__ = image_processing(a__ , a__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
def snake_case__ ( self : Any ):
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__magic_name__ , __magic_name__ = prepare_semantic_single_inputs()
__magic_name__ = image_processing(a__ , a__ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 150 )
__magic_name__ = True
__magic_name__ = image_processing(a__ , a__ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
| 98 | 1 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_50, 'eval_accuracy': 0.6, 'eval_loss': 0.9},
},
{
'framework': 'tensorflow',
'script': 'run_tf.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_00, 'eval_accuracy': 0.3, 'eval_loss': 0.9},
},
] )
class A ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : List[str] )-> List[str]:
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split(),encoding='utf-8',check=lowercase_,)
assert hasattr(self,'env' )
def snake_case__ ( self : int,lowercase_ : Optional[Any]=1 )-> Optional[int]:
'''simple docstring'''
return HuggingFace(
entry_point=self.script,source_dir=self.env.test_path,role=self.env.role,image_uri=self.env.image_uri,base_job_name=F'{self.env.base_job_name}-single',instance_count=lowercase_,instance_type=self.instance_type,debugger_hook_config=lowercase_,hyperparameters={**self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path},metric_definitions=self.env.metric_definitions,py_version='py36',)
def snake_case__ ( self : List[Any],lowercase_ : List[Any] )-> Optional[int]:
'''simple docstring'''
TrainingJobAnalytics(lowercase_ ).export_csv(F'{self.env.test_path}/{job_name}_metrics.csv' )
def snake_case__ ( self : Any )-> Optional[int]:
'''simple docstring'''
A__ = self.create_estimator()
# run training
estimator.fit()
# result dataframe
A__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
A__ = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
A__ = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
A__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds',9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'{estimator.latest_training_job.name}.json','w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss},lowercase_ )
| 7 |
class A :
"""simple docstring"""
def __init__( self : Any,lowercase_ : Tuple,lowercase_ : Any,lowercase_ : List[str] )-> List[Any]:
'''simple docstring'''
A__ = name
A__ = value
A__ = weight
def __repr__( self : int )-> Tuple:
'''simple docstring'''
return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def snake_case__ ( self : Any )-> str:
'''simple docstring'''
return self.value
def snake_case__ ( self : Any )-> Tuple:
'''simple docstring'''
return self.name
def snake_case__ ( self : Any )-> Dict:
'''simple docstring'''
return self.weight
def snake_case__ ( self : Union[str, Any] )-> Optional[Any]:
'''simple docstring'''
return self.value / self.weight
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]:
'''simple docstring'''
A__ = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def _snake_case( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ) -> Any:
'''simple docstring'''
A__ = sorted(SCREAMING_SNAKE_CASE__ , key=SCREAMING_SNAKE_CASE__ , reverse=SCREAMING_SNAKE_CASE__ )
A__ = []
A__ , A__ = 0.0, 0.0
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def _snake_case( ) -> Any:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 | 1 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def lowercase ( _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : int ) ->np.ndarray:
"""simple docstring"""
if (ksize % 2) == 0:
__snake_case : int = ksize + 1
__snake_case : Optional[int] = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(_snake_case ):
for x in range(_snake_case ):
# distance from center
__snake_case : List[Any] = x - ksize // 2
__snake_case : Tuple = y - ksize // 2
# degree to radiant
__snake_case : Union[str, Any] = theta / 180 * np.pi
__snake_case : Dict = np.cos(_theta )
__snake_case : Any = np.sin(_theta )
# get kernel x
__snake_case : Optional[Any] = cos_theta * px + sin_theta * py
# get kernel y
__snake_case : Union[str, Any] = -sin_theta * px + cos_theta * py
# fill kernel
__snake_case : int = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
SCREAMING_SNAKE_CASE : Tuple = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
SCREAMING_SNAKE_CASE : str = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
SCREAMING_SNAKE_CASE : Union[str, Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
SCREAMING_SNAKE_CASE : Tuple = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
SCREAMING_SNAKE_CASE : int = out / out.max() * 255
SCREAMING_SNAKE_CASE : Optional[Any] = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 24 |
"""simple docstring"""
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def lowercase ( ) ->Optional[int]:
"""simple docstring"""
__snake_case : int = torch.nn.Linear(2 , 4 )
__snake_case : Optional[Any] = torch.optim.AdamW(model.parameters() , lr=1.0 )
__snake_case : Optional[Any] = torch.optim.lr_scheduler.OneCycleLR(_snake_case , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
__snake_case : List[str] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
__snake_case : Dict = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def lowercase ( _snake_case : str ) ->Optional[Any]:
"""simple docstring"""
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def lowercase ( _snake_case : Union[str, Any] ) ->Tuple:
"""simple docstring"""
__snake_case : Dict = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(_snake_case )
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
@require_cuda
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(a_ ):
__snake_case : Any = Accelerator(cpu=a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = Accelerator()
__snake_case : Optional[int] = GradientState()
assert state.num_steps == 1
__snake_case : str = 4
assert state.num_steps == 4
assert state.sync_gradients is True
__snake_case : List[Any] = False
assert state.sync_gradients is False
GradientState._reset_state()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Union[str, Any] = accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*a_ , **a_ ):
pass
with patch('''torch.cuda.set_device''' , a_ ), patch_environment(ACCELERATE_TORCH_DEVICE='''cuda:64''' ):
__snake_case : List[Any] = Accelerator()
self.assertEqual(str(accelerator.state.device ) , '''cuda:64''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
__snake_case : Any = get_signature(a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
__snake_case : List[Any] = get_signature(a_ )
# saving hook
def save_config(a_ , a_ , a_ ):
__snake_case : Optional[Any] = {'''class_name''': models[0].__class__.__name__}
with open(os.path.join(a_ , '''data.json''' ) , '''w''' ) as f:
json.dump(a_ , a_ )
# loading hook
def load_config(a_ , a_ ):
with open(os.path.join(a_ , '''data.json''' ) , '''r''' ) as f:
__snake_case : Any = json.load(a_ )
__snake_case : List[str] = config['''class_name''']
__snake_case : str = accelerator.register_save_state_pre_hook(a_ )
__snake_case : Union[str, Any] = accelerator.register_load_state_pre_hook(a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match with hooks
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 )
# random class name to verify correct one is loaded
__snake_case : Any = '''random'''
# make sure loaded weights match with hooks
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match with hooks removed
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 )
# random class name to verify correct one is loaded
__snake_case : Union[str, Any] = '''random'''
# make sure loaded weights match with hooks removed
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = create_components()
__snake_case : Union[str, Any] = None
# This should work
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ , a_ )
self.assertTrue(dummy_obj is None )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components()
__snake_case : Optional[int] = [1, 2, 3]
# This should work
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ , a_ )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Dummy object should have `_is_accelerate_prepared` set to `True`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Model is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Optimizer is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Scheduler is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
@slow
@require_bnb
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__snake_case : Dict = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map={'''''': 0} , )
__snake_case : Optional[Any] = Accelerator()
# This should work
__snake_case : Any = accelerator.prepare(a_ )
@slow
@require_bnb
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__snake_case : Any = Accelerator()
with init_empty_weights():
__snake_case : List[str] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
__snake_case : Union[str, Any] = infer_auto_device_map(a_ )
__snake_case : str = '''cpu'''
__snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , device_map=a_ , load_in_abit=a_ , llm_inta_enable_fpaa_cpu_offload=a_ )
# This should not work and get value error
with self.assertRaises(a_ ):
__snake_case : Dict = accelerator.prepare(a_ )
@slow
@require_bnb
@require_multi_gpu
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__snake_case : str = {'''distributed_type''': DistributedType.MULTI_GPU}
with init_empty_weights():
__snake_case : Any = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
__snake_case : List[Any] = infer_auto_device_map(a_ )
__snake_case : Dict = 1
__snake_case : str = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , )
__snake_case : Any = Accelerator()
# This should not work and get value error
with self.assertRaises(a_ ):
__snake_case : Tuple = accelerator.prepare(a_ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
with init_empty_weights():
__snake_case : Dict = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
__snake_case : Tuple = infer_auto_device_map(a_ )
__snake_case : Tuple = 1
__snake_case : List[Any] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , )
__snake_case : Tuple = Accelerator()
# This should work
__snake_case : Dict = accelerator.prepare(a_ )
@require_cuda
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = torch.nn.Linear(10 , 10 )
__snake_case : List[str] = torch.optim.SGD(model.parameters() , lr=0.01 )
__snake_case : Optional[Any] = Accelerator(cpu=a_ )
__snake_case : str = accelerator.prepare(a_ )
| 24 | 1 |
class _A:
"""simple docstring"""
def __init__( self , _A ):
__A : Tuple = set_counts
__A : Union[str, Any] = max(_A )
__A : List[str] = len(_A )
__A : Union[str, Any] = [1] * num_sets
__A : int = list(range(_A ) )
def UpperCAmelCase_ ( self , _A , _A ):
__A : Tuple = self.get_parent(_A )
__A : Dict = self.get_parent(_A )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
__A : Optional[Any] = 0
__A : int = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
__A : List[str] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
__A : Any = 0
__A : Optional[int] = src_parent
__A : Optional[Any] = self.set_counts[src_parent]
__A : List[str] = max(self.max_set , _A )
return True
def UpperCAmelCase_ ( self , _A ):
if self.parents[disj_set] == disj_set:
return disj_set
__A : Union[str, Any] = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 280 |
import math
def _SCREAMING_SNAKE_CASE ( a ) -> list[int]:
__A : List[str] = []
__A : Any = 2
__A : Union[str, Any] = int(math.sqrt(a ) ) # Size of every segment
__A : Any = [True] * (end + 1)
__A : List[Any] = []
while start <= end:
if temp[start] is True:
in_prime.append(a )
for i in range(start * start , end + 1 , a ):
__A : Optional[int] = False
start += 1
prime += in_prime
__A : Any = end + 1
__A : Any = min(2 * end , a )
while low <= n:
__A : List[Any] = [True] * (high - low + 1)
for each in in_prime:
__A : List[str] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(a , high + 1 , a ):
__A : Optional[int] = False
for j in range(len(a ) ):
if temp[j] is True:
prime.append(j + low )
__A : Optional[int] = high + 1
__A : Tuple = min(high + end , a )
return prime
print(sieve(10**6))
| 280 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class UpperCAmelCase_ :
def __init__( self : str , UpperCAmelCase__ : Any ) -> List[str]:
lowerCAmelCase = data
lowerCAmelCase = None
class UpperCAmelCase_ :
def __init__( self : int ) -> Dict:
lowerCAmelCase = None
lowerCAmelCase = None
def __iter__( self : Dict ) -> Tuple:
lowerCAmelCase = self.head
while self.head:
yield node.data
lowerCAmelCase = node.next
if node == self.head:
break
def __len__( self : List[Any] ) -> List[str]:
return sum(1 for _ in self )
def __repr__( self : int ) -> Dict:
return "->".join(str(__lowerCAmelCase ) for item in iter(self ) )
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Any ) -> str:
self.insert_nth(len(self ) , __lowerCAmelCase )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : Any ) -> List[Any]:
self.insert_nth(0 , __lowerCAmelCase )
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Any ) -> Tuple:
if index < 0 or index > len(self ):
raise IndexError('list index out of range.' )
lowerCAmelCase = Node(__lowerCAmelCase )
if self.head is None:
lowerCAmelCase = new_node # first node points itself
lowerCAmelCase = lowerCAmelCase = new_node
elif index == 0: # insert at head
lowerCAmelCase = self.head
lowerCAmelCase = lowerCAmelCase = new_node
else:
lowerCAmelCase = self.head
for _ in range(index - 1 ):
lowerCAmelCase = temp.next
lowerCAmelCase = temp.next
lowerCAmelCase = new_node
if index == len(self ) - 1: # insert at tail
lowerCAmelCase = new_node
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
return self.delete_nth(0 )
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
return self.delete_nth(len(self ) - 1 )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : int = 0 ) -> str:
if not 0 <= index < len(self ):
raise IndexError('list index out of range.' )
lowerCAmelCase = self.head
if self.head == self.tail: # just one node
lowerCAmelCase = lowerCAmelCase = None
elif index == 0: # delete head node
lowerCAmelCase = self.tail.next.next
lowerCAmelCase = self.head.next
else:
lowerCAmelCase = self.head
for _ in range(index - 1 ):
lowerCAmelCase = temp.next
lowerCAmelCase = temp.next
lowerCAmelCase = temp.next.next
if index == len(self ) - 1: # delete at tail
lowerCAmelCase = temp
return delete_node.data
def __UpperCAmelCase ( self : int ) -> Tuple:
return len(self ) == 0
def a_ ( ) -> Any:
lowerCAmelCase = CircularLinkedList()
assert len(lowerCamelCase ) == 0
assert circular_linked_list.is_empty() is True
assert str(lowerCamelCase ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(lowerCamelCase ) == i
circular_linked_list.insert_nth(lowerCamelCase , i + 1 )
assert str(lowerCamelCase ) == "->".join(str(lowerCamelCase ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(lowerCamelCase ) == "->".join(str(lowerCamelCase ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(lowerCamelCase ) == "->".join(str(lowerCamelCase ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(lowerCamelCase ) == "->".join(str(lowerCamelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(lowerCamelCase ) == "->".join(str(lowerCamelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : int , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any]=1_3 , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : int=9_9 , UpperCAmelCase__ : int=3_2 , UpperCAmelCase__ : List[str]=5 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : Any=3_7 , UpperCAmelCase__ : Any="gelu" , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : Dict=5_1_2 , UpperCAmelCase__ : List[Any]=1_6 , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : str=4 , ) -> Union[str, Any]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_attention_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_choices
def __UpperCAmelCase ( self : Any ) -> List[Any]:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_attention_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = True
lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : List[str] = True
lowerCamelCase : List[Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self : int ) -> int:
lowerCAmelCase = FlaxRobertaModelTester(self )
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
for model_class_name in self.all_model_classes:
lowerCAmelCase = model_class_name.from_pretrained('roberta-base' , from_pt=UpperCAmelCase__ )
lowerCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase__ )
| 55 | 0 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def lowerCAmelCase_ ( __A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = FileLock(str(tmpdir / "foo.lock" ) )
UpperCAmelCase__ = FileLock(str(tmpdir / "foo.lock" ) )
UpperCAmelCase__ = 0.01
with locka.acquire():
with pytest.raises(UpperCamelCase__ ):
UpperCAmelCase__ = time.time()
locka.acquire(UpperCamelCase__ )
assert time.time() - _start > timeout
def lowerCAmelCase_ ( __A ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ = """a""" * 1_000 + """.lock"""
UpperCAmelCase__ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(UpperCamelCase__ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
UpperCAmelCase__ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCamelCase__ ):
locka.acquire(0 )
| 65 |
"""simple docstring"""
import unittest
import numpy as np
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , ):
'''simple docstring'''
_a : List[Any] = np.shape(UpperCamelCase__ )
_a : Any = np.shape(UpperCamelCase__ )
_a : Union[str, Any] = np.shape(UpperCamelCase__ )
if shape_a[0] != shape_b[0]:
_a : int = (
"""Expected the same number of rows for A and B. """
F"""Instead found A of size {shape_a} and B of size {shape_b}"""
)
raise ValueError(UpperCamelCase__ )
if shape_b[1] != shape_c[1]:
_a : Tuple = (
"""Expected the same number of columns for B and C. """
F"""Instead found B of size {shape_b} and C of size {shape_c}"""
)
raise ValueError(UpperCamelCase__ )
_a : int = pseudo_inv
if a_inv is None:
try:
_a : Optional[int] = np.linalg.inv(UpperCamelCase__ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : int ) -> None:
_a : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_a : Tuple = np.array([[0, 3], [3, 0], [2, 3]] )
_a : Optional[int] = np.array([[2, 1], [6, 3]] )
_a : Optional[Any] = schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
_a : Union[str, Any] = np.block([[a, b], [b.T, c]] )
_a : int = np.linalg.det(UpperCAmelCase__ )
_a : Union[str, Any] = np.linalg.det(UpperCAmelCase__ )
_a : List[Any] = np.linalg.det(UpperCAmelCase__ )
self.assertAlmostEqual(UpperCAmelCase__ , det_a * det_s )
def _lowercase ( self : int ) -> None:
_a : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_a : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] )
_a : Union[str, Any] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(UpperCAmelCase__ ):
schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : List[Any] ) -> None:
_a : Any = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_a : Dict = np.array([[0, 3], [3, 0], [2, 3]] )
_a : List[Any] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(UpperCAmelCase__ ):
schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 294 | 0 |
def _a ( SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), f'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
SCREAMING_SNAKE_CASE__ : Optional[Any] = f'''The input value of [n={number}] has to be > 0'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = sylvester(number - 1 )
SCREAMING_SNAKE_CASE__ : List[Any] = num - 1
SCREAMING_SNAKE_CASE__ : Tuple = num
return lower * upper + 1
if __name__ == "__main__":
print(f"The 8th number in Sylvester's sequence: {sylvester(8)}")
| 191 |
def _a ( SCREAMING_SNAKE_CASE__ : int = 50_00_00_00 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = set()
SCREAMING_SNAKE_CASE__ : Dict = int((limit - 24) ** (1 / 2) )
SCREAMING_SNAKE_CASE__ : Any = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , SCREAMING_SNAKE_CASE__ ) ) )
for primea in primes:
SCREAMING_SNAKE_CASE__ : Optional[int] = primea * primea
for primea in primes:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
SCREAMING_SNAKE_CASE__ : List[str] = primea * primea * primea * primea
SCREAMING_SNAKE_CASE__ : Optional[int] = square + cube + tetr
if total >= limit:
break
ret.add(SCREAMING_SNAKE_CASE__ )
return len(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(f"{solution() = }")
| 191 | 1 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase ) -> Optional[Any]:
A: Dict = s.rsplit(__lowercase , __lowercase )
return new.join(__lowercase )
def SCREAMING_SNAKE_CASE( __lowercase ) -> List[str]:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def SCREAMING_SNAKE_CASE( __lowercase ) -> str:
A: List[Any] = {}
A: Union[str, Any] = ['''group_1''', '''group_2''', '''group_3''', '''group_4''']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
A: Any = key.replace(F"""{group_key}.""" , F"""{group_key}.group.""" )
if "res_path" in key:
A: Dict = key.replace('''res_path.''' , '''res_path.path.''' )
if key.endswith('''.w''' ):
A: List[str] = rreplace(__lowercase , '''.w''' , '''.weight''' , 1 )
if key.endswith('''.b''' ):
A: Dict = rreplace(__lowercase , '''.b''' , '''.bias''' , 1 )
A: List[Any] = value.float()
return upgrade
@torch.no_grad()
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase=None , __lowercase=True ) -> List[str]:
from dall_e import Encoder
A: Tuple = Encoder()
if os.path.exists(__lowercase ):
A: str = torch.load(__lowercase )
else:
A: Optional[Any] = torch.hub.load_state_dict_from_url(__lowercase )
if isinstance(__lowercase , __lowercase ):
A: str = ckpt.state_dict()
encoder.load_state_dict(__lowercase )
if config_path is not None:
A: Tuple = FlavaImageCodebookConfig.from_pretrained(__lowercase )
else:
A: Optional[Any] = FlavaImageCodebookConfig()
A: str = FlavaImageCodebook(__lowercase ).eval()
A: List[str] = encoder.state_dict()
A: Union[str, Any] = upgrade_state_dict(__lowercase )
hf_model.load_state_dict(__lowercase )
A: Optional[int] = hf_model.state_dict()
A: Optional[Any] = count_parameters(__lowercase )
A: List[Any] = count_parameters(__lowercase )
assert torch.allclose(__lowercase , __lowercase , atol=1E-3 )
if save_checkpoint:
hf_model.save_pretrained(__lowercase )
else:
return hf_state_dict
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
UpperCamelCase = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 319 |
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
UpperCamelCase = {
'''b0''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def SCREAMING_SNAKE_CASE( __lowercase ) -> Dict:
A: Tuple = EfficientNetConfig()
A: Optional[int] = CONFIG_MAP[model_name]['''hidden_dim''']
A: Optional[int] = CONFIG_MAP[model_name]['''width_coef''']
A: str = CONFIG_MAP[model_name]['''depth_coef''']
A: Dict = CONFIG_MAP[model_name]['''image_size''']
A: str = CONFIG_MAP[model_name]['''dropout_rate''']
A: Optional[Any] = CONFIG_MAP[model_name]['''dw_padding''']
A: Optional[Any] = '''huggingface/label-files'''
A: List[str] = '''imagenet-1k-id2label.json'''
A: Dict = 1_0_0_0
A: Any = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) )
A: Tuple = {int(__lowercase ): v for k, v in idalabel.items()}
A: int = idalabel
A: Tuple = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE( ) -> Any:
A: Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A: Union[str, Any] = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple:
A: List[str] = CONFIG_MAP[model_name]['''image_size''']
A: List[Any] = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=__lowercase , )
return preprocessor
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]:
A: List[str] = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
A: List[str] = sorted(set(__lowercase ) )
A: Dict = len(__lowercase )
A: List[str] = {b: str(__lowercase ) for b, i in zip(__lowercase , range(__lowercase ) )}
A: Optional[int] = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
A: int = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
A: Union[str, Any] = {}
for item in rename_keys:
if item[0] in original_param_names:
A: str = '''efficientnet.''' + item[1]
A: int = '''classifier.weight'''
A: Tuple = '''classifier.bias'''
return key_mapping
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Tuple:
for key, value in tf_params.items():
if "normalization" in key:
continue
A: Union[str, Any] = key_mapping[key]
if "_conv" in key and "kernel" in key:
A: List[str] = torch.from_numpy(__lowercase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
A: List[Any] = torch.from_numpy(__lowercase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
A: Optional[Any] = torch.from_numpy(np.transpose(__lowercase ) )
else:
A: Any = torch.from_numpy(__lowercase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(__lowercase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase ) -> Tuple:
A: Optional[int] = model_classes[model_name](
include_top=__lowercase , weights='''imagenet''' , input_tensor=__lowercase , input_shape=__lowercase , pooling=__lowercase , classes=1_0_0_0 , classifier_activation='''softmax''' , )
A: List[str] = original_model.trainable_variables
A: Optional[Any] = original_model.non_trainable_variables
A: Union[str, Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
A: int = param.numpy()
A: Tuple = list(tf_params.keys() )
# Load HuggingFace model
A: Dict = get_efficientnet_config(__lowercase )
A: Union[str, Any] = EfficientNetForImageClassification(__lowercase ).eval()
A: Dict = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
A: int = rename_keys(__lowercase )
replace_params(__lowercase , __lowercase , __lowercase )
# Initialize preprocessor and preprocess input image
A: List[Any] = convert_image_processor(__lowercase )
A: Optional[Any] = preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
A: str = hf_model(**__lowercase )
A: List[Any] = outputs.logits.detach().numpy()
# Original model inference
A: Any = False
A: List[Any] = CONFIG_MAP[model_name]['''image_size''']
A: List[Any] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
A: str = image.img_to_array(__lowercase )
A: Dict = np.expand_dims(__lowercase , axis=0 )
A: Any = original_model.predict(__lowercase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(__lowercase , __lowercase , atol=1E-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(__lowercase ):
os.mkdir(__lowercase )
# Save converted model and image processor
hf_model.save_pretrained(__lowercase )
preprocessor.save_pretrained(__lowercase )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
A: int = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(__lowercase )
hf_model.push_to_hub(__lowercase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
UpperCamelCase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 319 | 1 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Union[str, Any]:
'''simple docstring'''
return self.get_dummy_input()
@property
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Optional[int]:
'''simple docstring'''
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""")
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : List[str]=False , ) ->str:
'''simple docstring'''
lowerCamelCase__: str =4
lowerCamelCase__: Dict =32
lowerCamelCase__: int =(32, 32)
lowerCamelCase__: Optional[int] =torch.manual_seed(0)
lowerCamelCase__: Dict =torch.device(UpperCAmelCase_)
lowerCamelCase__: int =(batch_size, num_channels) + sizes
lowerCamelCase__: Union[str, Any] =randn_tensor(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=UpperCAmelCase_)
lowerCamelCase__: Optional[Any] ={"hidden_states": hidden_states}
if include_temb:
lowerCamelCase__: Any =128
lowerCamelCase__: Any =randn_tensor((batch_size, temb_channels) , generator=UpperCAmelCase_ , device=UpperCAmelCase_)
if include_res_hidden_states_tuple:
lowerCamelCase__: Tuple =torch.manual_seed(1)
lowerCamelCase__: int =(randn_tensor(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=UpperCAmelCase_),)
if include_encoder_hidden_states:
lowerCamelCase__: List[Any] =floats_tensor((batch_size, 32, 32)).to(UpperCAmelCase_)
if include_skip_sample:
lowerCamelCase__: int =randn_tensor(((batch_size, 3) + sizes) , generator=UpperCAmelCase_ , device=UpperCAmelCase_)
return dummy_input
def SCREAMING_SNAKE_CASE_ (self : int) ->Any:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] ={
"in_channels": 32,
"out_channels": 32,
"temb_channels": 128,
}
if self.block_type == "up":
lowerCamelCase__: Any =32
if self.block_type == "mid":
init_dict.pop("out_channels")
lowerCamelCase__: Tuple =self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : Optional[int]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[Any] =self.prepare_init_args_and_inputs_for_common()
lowerCamelCase__: Optional[int] =self.block_class(**UpperCAmelCase_)
unet_block.to(UpperCAmelCase_)
unet_block.eval()
with torch.no_grad():
lowerCamelCase__: Union[str, Any] =unet_block(**UpperCAmelCase_)
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: Tuple =output[0]
self.assertEqual(output.shape , self.output_shape)
lowerCamelCase__: Optional[Any] =output[0, -1, -3:, -3:]
lowerCamelCase__: str =torch.tensor(UpperCAmelCase_).to(UpperCAmelCase_)
assert torch_all_close(output_slice.flatten() , UpperCAmelCase_ , atol=5E-3)
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps")
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Optional[int] =self.prepare_init_args_and_inputs_for_common()
lowerCamelCase__: Optional[int] =self.block_class(**UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.train()
lowerCamelCase__: Any =model(**UpperCAmelCase_)
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: List[Any] =output[0]
lowerCamelCase__: Dict =torch.device(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =randn_tensor(output.shape , device=UpperCAmelCase_)
lowerCamelCase__: Dict =torch.nn.functional.mse_loss(UpperCAmelCase_ , UpperCAmelCase_)
loss.backward()
| 273 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__A = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__A = direct_transformers_import(PATH_TO_TRANSFORMERS)
__A = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__A = re.compile(R"\[(.+?)\]\((https://huggingface\.co/.+?)\)")
__A = {
"DecisionTransformerConfig",
"EncoderDecoderConfig",
"MusicgenConfig",
"RagConfig",
"SpeechEncoderDecoderConfig",
"TimmBackboneConfig",
"VisionEncoderDecoderConfig",
"VisionTextDualEncoderConfig",
"LlamaConfig",
}
def lowerCAmelCase_ ( __a ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__: List[Any] =None
# source code of `config_class`
lowerCamelCase__: List[Any] =inspect.getsource(__a )
lowerCamelCase__: str =_re_checkpoint.findall(__a )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
lowerCamelCase__: str =ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
lowerCamelCase__: Optional[int] =F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
lowerCamelCase__: List[Any] =ckpt_name
break
return checkpoint
def lowerCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =[]
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
lowerCamelCase__: List[Any] =get_checkpoint_from_config_class(__a )
lowerCamelCase__: Tuple =config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__a )
if len(__a ) > 0:
lowerCamelCase__: List[str] ="\n".join(sorted(__a ) )
raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 273 | 1 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''vocab.json'''}
_lowercase = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
_lowercase = {'''mgp-str''': 27}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Any = VOCAB_FILES_NAMES
_lowerCamelCase: Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[Any] ,A_ : str ,A_ : List[Any]="[GO]" ,A_ : Any="[GO]" ,A_ : Union[str, Any]="[s]" ,A_ : Dict="[GO]" ,**A_ : Any ) -> str:
super().__init__(
unk_token=A_ ,bos_token=A_ ,eos_token=A_ ,pad_token=A_ ,**A_ ,)
with open(A_ ,encoding='utf-8' ) as vocab_handle:
A = json.load(A_ )
A = {v: k for k, v in self.vocab.items()}
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
return len(self.vocab )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
return dict(self.vocab ,**self.added_tokens_encoder )
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : List[Any] ) -> Union[str, Any]:
A = []
for s in text:
char_tokens.extend(A_ )
return char_tokens
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[Any] ) -> List[Any]:
return self.vocab.get(A_ ,self.vocab.get(self.unk_token ) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Optional[int] ) -> str:
return self.decoder.get(A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error('Vocabulary path ({}) should be a directory'.format(A_ ) )
return
A = os.path.join(
A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
with open(A_ ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.vocab ,indent=2 ,sort_keys=A_ ,ensure_ascii=A_ ) + '\n' )
return (vocab_file,)
| 74 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
_lowercase = logging.get_logger(__name__)
@dataclass
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[int] = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self : int ,**A_ : Any ) -> Any:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
A = deprecated_arg[3:]
A = not kwargs.pop(A_ )
logger.warning(
F'{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'
F' {positive_arg}={kwargs[positive_arg]}' )
A = kwargs.pop('tpu_name' ,self.tpu_name )
A = kwargs.pop('device_idx' ,self.device_idx )
A = kwargs.pop('eager_mode' ,self.eager_mode )
A = kwargs.pop('use_xla' ,self.use_xla )
super().__init__(**A_ )
_lowerCamelCase: str = field(
default=_lowercase , metadata={'''help''': '''Name of TPU'''} , )
_lowerCamelCase: int = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
_lowerCamelCase: bool = field(default=_lowercase , metadata={'''help''': '''Benchmark models in eager model.'''} )
_lowerCamelCase: bool = field(
default=_lowercase , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self ,['tf'] )
A = None
if self.tpu:
try:
if self.tpu_name:
A = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
A = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
A = None
return tpu
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self ,['tf'] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
A = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] ,'GPU' )
A = tf.distribute.OneDeviceStrategy(device=F'/gpu:{self.device_idx}' )
else:
tf.config.set_visible_devices([] ,'GPU' ) # disable GPU
A = tf.distribute.OneDeviceStrategy(device=F'/cpu:{self.device_idx}' )
return strategy
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> bool:
requires_backends(self ,['tf'] )
return self._setup_tpu is not None
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> "tf.distribute.Strategy":
requires_backends(self ,['tf'] )
return self._setup_strategy
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> str:
requires_backends(self ,['tf'] )
return tf.config.list_physical_devices('GPU' )
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
requires_backends(self ,['tf'] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> bool:
return self.n_gpu > 0
| 74 | 1 |
"""simple docstring"""
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : list[str] | None = None )-> Union[str, Any]:
UpperCAmelCase__ : Any = word_bank or []
# create a table
UpperCAmelCase__ : int = len(lowerCAmelCase__ ) + 1
UpperCAmelCase__ : list[list[list[str]]] = []
for _ in range(lowerCAmelCase__ ):
table.append([] )
# seed value
UpperCAmelCase__ : Optional[int] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCAmelCase__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCAmelCase__ )] == word:
UpperCAmelCase__ : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCAmelCase__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCAmelCase__ )]:
combination.reverse()
return table[len(lowerCAmelCase__ )]
if __name__ == "__main__":
print(all_construct("""jwajalapa""", ["""jwa""", """j""", """w""", """a""", """la""", """lapa"""]))
print(all_construct("""rajamati""", ["""s""", """raj""", """amat""", """raja""", """ma""", """i""", """t"""]))
print(
all_construct(
"""hexagonosaurus""",
["""h""", """ex""", """hex""", """ag""", """ago""", """ru""", """auru""", """rus""", """go""", """no""", """o""", """s"""],
)
)
| 366 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''megatron-bert'''
def __init__( self : Optional[Any] , snake_case__ : Dict=2_9_0_5_6 , snake_case__ : Optional[int]=1_0_2_4 , snake_case__ : int=2_4 , snake_case__ : str=1_6 , snake_case__ : Optional[Any]=4_0_9_6 , snake_case__ : List[str]="gelu" , snake_case__ : List[str]=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : Tuple=5_1_2 , snake_case__ : str=2 , snake_case__ : List[Any]=0.02 , snake_case__ : Any=1e-12 , snake_case__ : Any=0 , snake_case__ : str="absolute" , snake_case__ : Optional[Any]=True , **snake_case__ : int , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
UpperCAmelCase__ : str = vocab_size
UpperCAmelCase__ : str = hidden_size
UpperCAmelCase__ : List[str] = num_hidden_layers
UpperCAmelCase__ : Optional[int] = num_attention_heads
UpperCAmelCase__ : int = hidden_act
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : Tuple = hidden_dropout_prob
UpperCAmelCase__ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Any = max_position_embeddings
UpperCAmelCase__ : Dict = type_vocab_size
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : int = layer_norm_eps
UpperCAmelCase__ : Optional[Any] = position_embedding_type
UpperCAmelCase__ : Any = use_cache
| 298 | 0 |
"""simple docstring"""
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _a ( lowerCAmelCase):
"""simple docstring"""
def lowercase__ ( self : Union[str, Any] )->Any:
_UpperCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCamelCase , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(__UpperCamelCase , '''num_attention_heads''' ) )
class _a :
"""simple docstring"""
def __init__( self : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any=1_3 , __UpperCamelCase : int=6_4 , __UpperCamelCase : Any=3 , __UpperCamelCase : Union[str, Any]=3 , __UpperCamelCase : Tuple=2 , __UpperCamelCase : str=1 , __UpperCamelCase : List[Any]=1_6 , __UpperCamelCase : str=[1_2_8, 2_5_6, 3_8_4] , __UpperCamelCase : Dict=[4, 6, 8] , __UpperCamelCase : List[str]=[2, 3, 4] , __UpperCamelCase : Any=[1_6, 1_6, 1_6] , __UpperCamelCase : Union[str, Any]=0 , __UpperCamelCase : Optional[Any]=[2, 2, 2] , __UpperCamelCase : int=[2, 2, 2] , __UpperCamelCase : int=0.0_2 , __UpperCamelCase : str=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Optional[Any]=2 , )->Union[str, Any]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = kernel_size
_UpperCAmelCase = stride
_UpperCAmelCase = padding
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = depths
_UpperCAmelCase = key_dim
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = patch_size
_UpperCAmelCase = attention_ratio
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = initializer_range
_UpperCAmelCase = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = num_labels
_UpperCAmelCase = initializer_range
def lowercase__ ( self : Union[str, Any] )->List[Any]:
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : List[Any] )->Union[str, Any]:
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : List[Any] )->Union[str, Any]:
_UpperCAmelCase = LevitModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
_UpperCAmelCase = (self.image_size, self.image_size)
_UpperCAmelCase , _UpperCAmelCase = image_size[0], image_size[1]
for _ in range(4 ):
_UpperCAmelCase = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
_UpperCAmelCase = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : Dict )->List[Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LevitForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Any )->Tuple:
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _a ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
UpperCamelCase__ = (
{
"""feature-extraction""": LevitModel,
"""image-classification""": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowercase__ ( self : List[str] )->Union[str, Any]:
_UpperCAmelCase = LevitModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=3_7 )
def lowercase__ ( self : List[str] )->Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self : Union[str, Any] )->Tuple:
return
@unittest.skip(reason='''Levit does not use inputs_embeds''' )
def lowercase__ ( self : List[str] )->Any:
pass
@unittest.skip(reason='''Levit does not support input and output embeddings''' )
def lowercase__ ( self : List[Any] )->Optional[int]:
pass
@unittest.skip(reason='''Levit does not output attentions''' )
def lowercase__ ( self : Tuple )->Optional[int]:
pass
def lowercase__ ( self : str )->Dict:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowercase__ ( self : List[Any] )->Optional[int]:
def check_hidden_states_output(__UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Any ):
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = len(self.model_tester.depths ) + 1
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
_UpperCAmelCase = (self.model_tester.image_size, self.model_tester.image_size)
_UpperCAmelCase , _UpperCAmelCase = image_size[0], image_size[1]
for _ in range(4 ):
_UpperCAmelCase = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
_UpperCAmelCase = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase__ ( self : str )->Tuple:
pass
def lowercase__ ( self : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any]=False )->List[Any]:
_UpperCAmelCase = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowercase__ ( self : Optional[Any] )->Dict:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowercase__ ( self : List[Any] )->int:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
def lowercase__ ( self : Any )->Union[str, Any]:
if not self.model_tester.is_training:
return
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__UpperCamelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
_UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
_UpperCAmelCase = model(**__UpperCamelCase ).loss
loss.backward()
def lowercase__ ( self : str )->int:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_UpperCAmelCase = False
_UpperCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
_UpperCAmelCase = model_class(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCamelCase )
model.train()
_UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
_UpperCAmelCase = model(**__UpperCamelCase ).loss
loss.backward()
def lowercase__ ( self : Optional[Any] )->str:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__UpperCamelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}' ):
_UpperCAmelCase = problem_type['''title''']
_UpperCAmelCase = problem_type['''num_labels''']
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
_UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if problem_type["num_labels"] > 1:
_UpperCAmelCase = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
_UpperCAmelCase = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__UpperCamelCase ) as warning_list:
_UpperCAmelCase = model(**__UpperCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def lowercase__ ( self : Union[str, Any] )->Optional[int]:
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = LevitModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase):
"""simple docstring"""
@cached_property
def lowercase__ ( self : Tuple )->Dict:
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowercase__ ( self : Tuple )->List[Any]:
_UpperCAmelCase = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__UpperCamelCase )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=__UpperCamelCase , return_tensors='''pt''' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**__UpperCamelCase )
# verify the logits
_UpperCAmelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
_UpperCAmelCase = torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
| 260 |
"""simple docstring"""
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class _a ( lowerCAmelCase):
"""simple docstring"""
def lowercase__ ( self : List[Any] , __UpperCamelCase : float )->float:
return 0.0
def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_UpperCAmelCase = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def lowercase ( _SCREAMING_SNAKE_CASE : FilterType , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = 512
_UpperCAmelCase = [1] + [0] * (size - 1)
_UpperCAmelCase = [filter_type.process(_SCREAMING_SNAKE_CASE ) for item in inputs]
_UpperCAmelCase = [0] * (samplerate - size) # zero-padding
outputs += filler
_UpperCAmelCase = np.abs(np.fft.fft(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = 20 * np.logaa(_SCREAMING_SNAKE_CASE )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
# Display within reasonable bounds
_UpperCAmelCase = get_bounds(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('''Gain (dB)''' )
plt.plot(_SCREAMING_SNAKE_CASE )
plt.show()
def lowercase ( _SCREAMING_SNAKE_CASE : FilterType , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = 512
_UpperCAmelCase = [1] + [0] * (size - 1)
_UpperCAmelCase = [filter_type.process(_SCREAMING_SNAKE_CASE ) for item in inputs]
_UpperCAmelCase = [0] * (samplerate - size) # zero-padding
outputs += filler
_UpperCAmelCase = np.angle(np.fft.fft(_SCREAMING_SNAKE_CASE ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('''Phase shift (Radians)''' )
plt.plot(np.unwrap(_SCREAMING_SNAKE_CASE , -2 * pi ) )
plt.show()
| 260 | 1 |
"""simple docstring"""
def A_ ( _lowerCAmelCase : int = 10_00 ):
"""simple docstring"""
_a = -1
_a = 0
for a in range(1, n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
_a = (n * n - 2 * a * n) // (2 * n - 2 * a)
_a = n - a - b
if c * c == (a * a + b * b):
_a = a * b * c
if candidate >= product:
_a = candidate
return product
if __name__ == "__main__":
print(f'{solution() = }')
| 153 |
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> str:
_a = '''ylacombe/bark-small'''
_a = tempfile.mkdtemp()
_a = '''en_speaker_1'''
_a = '''This is a test string'''
_a = '''speaker_embeddings_path.json'''
_a = '''speaker_embeddings'''
def _UpperCAmelCase ( self , **__UpperCAmelCase ) -> Tuple:
return AutoTokenizer.from_pretrained(self.checkpoint , **__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ) -> str:
_a = self.get_tokenizer()
_a = BarkProcessor(tokenizer=__UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
_a = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_a = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_a = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def _UpperCAmelCase ( self ) -> str:
_a = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_a = 35
_a = 2
_a = 8
_a = {
'''semantic_prompt''': np.ones(__UpperCAmelCase ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_a = processor(text=self.input_string , voice_preset=__UpperCAmelCase )
_a = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_a = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(__UpperCAmelCase , **__UpperCAmelCase )
_a = processor(text=self.input_string , voice_preset=__UpperCAmelCase )
_a = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_a = processor(text=self.input_string , voice_preset=self.voice_preset )
def _UpperCAmelCase ( self ) -> Tuple:
_a = self.get_tokenizer()
_a = BarkProcessor(tokenizer=__UpperCAmelCase )
_a = processor(text=self.input_string )
_a = tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 153 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "speech_to_text_2"
lowercase__ = ["past_key_values"]
lowercase__ = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self: List[str], a_: str=10_000, a_: str=6, a_: List[Any]=2_048, a_: Optional[int]=4, a_: List[str]=0.0, a_: Optional[int]=True, a_: List[str]="relu", a_: Optional[Any]=256, a_: Any=0.1, a_: List[str]=0.0, a_: Optional[int]=0.0, a_: Optional[Any]=0.02, a_: Union[str, Any]=2, a_: List[str]=True, a_: Optional[Any]=1, a_: Dict=0, a_: Dict=2, a_: Optional[int]=1_024, **a_: Any, ):
'''simple docstring'''
_snake_case : Union[str, Any] = vocab_size
_snake_case : str = d_model
_snake_case : Tuple = decoder_ffn_dim
_snake_case : Optional[int] = decoder_layers
_snake_case : Optional[Any] = decoder_attention_heads
_snake_case : Union[str, Any] = dropout
_snake_case : List[str] = attention_dropout
_snake_case : str = activation_dropout
_snake_case : Union[str, Any] = activation_function
_snake_case : List[str] = init_std
_snake_case : int = decoder_layerdrop
_snake_case : int = use_cache
_snake_case : str = decoder_layers
_snake_case : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
_snake_case : int = max_target_positions
super().__init__(
pad_token_id=a_, bos_token_id=a_, eos_token_id=a_, decoder_start_token_id=a_, **a_, )
| 64 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCamelCase_ ( ):
lowerCamelCase_ , lowerCamelCase_ = 9, 1_4 # noqa: F841
lowerCamelCase_ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 1_4],
[3, 4, 9],
[5, 4, 1_0],
[1, 7, 1_1],
]
lowerCamelCase_ = defaultdict(lowerCamelCase__ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowerCamelCase_ = mst(lowerCamelCase__ )
lowerCamelCase_ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowerCamelCase_ = tuple(answer[:2] )
lowerCamelCase_ = tuple(edge[::-1] )
assert edge in result or reverse in result
| 19 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"""facebook/nllb-moe-54B""": """https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json""",
}
class a__ ( snake_case__ ):
_a : Dict = """nllb-moe"""
_a : str = ["""past_key_values"""]
_a : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , _A=1_2_8_1_1_2 , _A=1_0_2_4 , _A=1_2 , _A=4_0_9_6 , _A=1_6 , _A=1_2 , _A=4_0_9_6 , _A=1_6 , _A=0.05 , _A=0.05 , _A=True , _A=True , _A="relu" , _A=1_0_2_4 , _A=0.1 , _A=0.1 , _A=0.0 , _A=0.02 , _A=2 , _A=True , _A=False , _A="float32" , _A=False , _A=1_2_8 , _A=6_4 , _A=4 , _A=4 , _A=0.0_01 , _A=0.0_01 , _A="all" , _A=False , _A=False , _A=1.0 , _A=0.2 , _A=1 , _A=0 , _A=2 , _A=False , **_A , ):
"""simple docstring"""
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = d_model
__lowerCAmelCase = encoder_ffn_dim
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = encoder_attention_heads
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = activation_function
__lowerCAmelCase = init_std
__lowerCAmelCase = encoder_layerdrop
__lowerCAmelCase = decoder_layerdrop
__lowerCAmelCase = use_cache
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCAmelCase = router_z_loss_coef
__lowerCAmelCase = router_aux_loss_coef
__lowerCAmelCase = decoder_sparse_step
__lowerCAmelCase = encoder_sparse_step
__lowerCAmelCase = num_experts
__lowerCAmelCase = expert_capacity
__lowerCAmelCase = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
__lowerCAmelCase = router_dtype
__lowerCAmelCase = router_ignore_padding_tokens
__lowerCAmelCase = batch_prioritized_routing
__lowerCAmelCase = second_expert_policy
__lowerCAmelCase = normalize_router_prob_before_dropping
__lowerCAmelCase = moe_eval_capacity_token_fraction
__lowerCAmelCase = moe_token_dropout
__lowerCAmelCase = output_router_logits
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , is_encoder_decoder=_A , decoder_start_token_id=_A , **_A , )
| 364 |
import enum
import shutil
import sys
UpperCamelCase__ , UpperCamelCase__ = shutil.get_terminal_size()
UpperCamelCase__ = {"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""}
class a__ ( enum.Enum ):
_a : Any = 0
_a : Dict = 1
def _a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict="" ):
sys.stdout.write(str(SCREAMING_SNAKE_CASE_ ) + end )
sys.stdout.flush()
def _a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str="" ):
forceWrite(F"""\u001b[{color}m{content}\u001b[0m""" , SCREAMING_SNAKE_CASE_ )
def _a ( ):
forceWrite("\r" )
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str ):
forceWrite(F"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" )
def _a ( ):
forceWrite(" " * TERMINAL_WIDTH )
reset_cursor()
def _a ( ):
reset_cursor()
forceWrite("-" * TERMINAL_WIDTH )
| 102 | 0 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCamelCase__ = logging.get_logger(__name__)
class A ( enum.Enum ):
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : List[Any] = 1
@add_end_docstrings(UpperCAmelCase_ )
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : int = 'generated'
def __init__(self : str , *__UpperCAmelCase : Any , **__UpperCAmelCase : Tuple ) -> List[Any]:
"""simple docstring"""
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def lowercase_ (self : Tuple , __UpperCAmelCase : str=None , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Dict=None , **__UpperCAmelCase : Union[str, Any] , ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = {}
if truncation is not None:
UpperCAmelCase__ = truncation
UpperCAmelCase__ = generate_kwargs
UpperCAmelCase__ = {}
if return_tensors is not None and return_type is None:
UpperCAmelCase__ = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
UpperCAmelCase__ = return_type
if clean_up_tokenization_spaces is not None:
UpperCAmelCase__ = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCAmelCase__ = self.tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
if len(__UpperCAmelCase ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
UpperCAmelCase__ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowercase_ (self : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> str:
"""simple docstring"""
return True
def lowercase_ (self : Optional[int] , *__UpperCAmelCase : str , __UpperCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(args[0] , __UpperCAmelCase ):
if self.tokenizer.pad_token_id is None:
raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input" )
UpperCAmelCase__ = ([prefix + arg for arg in args[0]],)
UpperCAmelCase__ = True
elif isinstance(args[0] , __UpperCAmelCase ):
UpperCAmelCase__ = (prefix + args[0],)
UpperCAmelCase__ = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
UpperCAmelCase__ = self.tokenizer(*__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__(self : Dict , *__UpperCAmelCase : Optional[Any] , **__UpperCAmelCase : Optional[Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = super().__call__(*__UpperCAmelCase , **__UpperCAmelCase )
if (
isinstance(args[0] , __UpperCAmelCase )
and all(isinstance(__UpperCAmelCase , __UpperCAmelCase ) for el in args[0] )
and all(len(__UpperCAmelCase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def lowercase_ (self : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , **__UpperCAmelCase : Dict ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self._parse_and_tokenize(__UpperCAmelCase , truncation=__UpperCAmelCase , **__UpperCAmelCase )
return inputs
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : Dict , **__UpperCAmelCase : Optional[Any] ) -> Any:
"""simple docstring"""
if self.framework == "pt":
UpperCAmelCase__ , UpperCAmelCase__ = model_inputs["input_ids"].shape
elif self.framework == "tf":
UpperCAmelCase__ , UpperCAmelCase__ = tf.shape(model_inputs["input_ids"] ).numpy()
UpperCAmelCase__ = generate_kwargs.get("min_length" , self.model.config.min_length )
UpperCAmelCase__ = generate_kwargs.get("max_length" , self.model.config.max_length )
self.check_inputs(__UpperCAmelCase , generate_kwargs["min_length"] , generate_kwargs["max_length"] )
UpperCAmelCase__ = self.model.generate(**__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase__ = output_ids.shape[0]
if self.framework == "pt":
UpperCAmelCase__ = output_ids.reshape(__UpperCAmelCase , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
UpperCAmelCase__ = tf.reshape(__UpperCAmelCase , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def lowercase_ (self : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int]=ReturnType.TEXT , __UpperCAmelCase : Dict=False ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
UpperCAmelCase__ = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
UpperCAmelCase__ = {
f"""{self.return_name}_text""": self.tokenizer.decode(
__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase , )
}
records.append(__UpperCAmelCase )
return records
@add_end_docstrings(UpperCAmelCase_ )
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Union[str, Any] = 'summary'
def __call__(self : Dict , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : int ) -> Any:
"""simple docstring"""
return super().__call__(*__UpperCAmelCase , **__UpperCAmelCase )
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"a summarization task, where outputs shorter than the input are typically wanted, you might "
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(UpperCAmelCase_ )
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : str = 'translation'
def lowercase_ (self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"increasing your max_length manually, e.g. translator('...', max_length=400)" )
return True
def lowercase_ (self : int , *__UpperCAmelCase : List[Any] , __UpperCAmelCase : Any=TruncationStrategy.DO_NOT_TRUNCATE , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : Optional[int]=None ) -> List[Any]:
"""simple docstring"""
if getattr(self.tokenizer , "_build_translation_inputs" , __UpperCAmelCase ):
return self.tokenizer._build_translation_inputs(
*__UpperCAmelCase , return_tensors=self.framework , truncation=__UpperCAmelCase , src_lang=__UpperCAmelCase , tgt_lang=__UpperCAmelCase )
else:
return super()._parse_and_tokenize(*__UpperCAmelCase , truncation=__UpperCAmelCase )
def lowercase_ (self : int , __UpperCAmelCase : Dict=None , __UpperCAmelCase : List[Any]=None , **__UpperCAmelCase : List[str] ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = super()._sanitize_parameters(**__UpperCAmelCase )
if src_lang is not None:
UpperCAmelCase__ = src_lang
if tgt_lang is not None:
UpperCAmelCase__ = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
UpperCAmelCase__ = kwargs.get("task" , self.task )
UpperCAmelCase__ = task.split("_" )
if task and len(__UpperCAmelCase ) == 4:
# translation, XX, to YY
UpperCAmelCase__ = items[1]
UpperCAmelCase__ = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__(self : Optional[int] , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
return super().__call__(*__UpperCAmelCase , **__UpperCAmelCase )
| 65 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCamelCase__ = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCamelCase__ = [0, 2_5, 5_0]
UpperCamelCase__ = [2_5, 5_0, 7_5]
UpperCamelCase__ = fuzz.membership.trimf(X, abca)
UpperCamelCase__ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCamelCase__ = np.ones(7_5)
UpperCamelCase__ = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
UpperCamelCase__ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCamelCase__ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCamelCase__ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCamelCase__ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCamelCase__ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCamelCase__ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCamelCase__ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCamelCase__ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 65 | 1 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case_( a__ ):
__UpperCamelCase = (UnCLIPScheduler,)
def lowerCamelCase__ ( self : List[str] , **UpperCamelCase_ : int ):
lowerCAmelCase : List[str] = {
'''num_train_timesteps''': 1_0_0_0,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**UpperCamelCase_ )
return config
def lowerCamelCase__ ( self : Optional[int] ):
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=UpperCamelCase_ )
def lowerCamelCase__ ( self : Union[str, Any] ):
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=UpperCamelCase_ , prev_timestep=UpperCamelCase_ )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Any = self.scheduler_classes[0]
lowerCAmelCase : Tuple = self.get_scheduler_config(variance_type='''fixed_small_log''' )
lowerCAmelCase : Union[str, Any] = scheduler_class(**UpperCamelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_549_625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.9_994_987 ) ) < 1E-5
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase : Optional[int] = self.get_scheduler_config(variance_type='''learned_range''' )
lowerCAmelCase : str = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = 0.5
assert scheduler._get_variance(1 , predicted_variance=UpperCamelCase_ ) - -10.1_712_790 < 1E-5
assert scheduler._get_variance(4_8_7 , predicted_variance=UpperCamelCase_ ) - -5.7_998_052 < 1E-5
assert scheduler._get_variance(9_9_9 , predicted_variance=UpperCamelCase_ ) - -0.0_010_011 < 1E-5
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Any = self.scheduler_classes[0]
lowerCAmelCase : Any = self.get_scheduler_config()
lowerCAmelCase : List[str] = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : Tuple = scheduler.timesteps
lowerCAmelCase : int = self.dummy_model()
lowerCAmelCase : str = self.dummy_sample_deter
lowerCAmelCase : Any = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase_ ):
# 1. predict noise residual
lowerCAmelCase : Dict = model(UpperCamelCase_ , UpperCamelCase_ )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase : str = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
lowerCAmelCase : Optional[Any] = pred_prev_sample
lowerCAmelCase : List[Any] = torch.sum(torch.abs(UpperCamelCase_ ) )
lowerCAmelCase : List[str] = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1E-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1E-3
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase : Dict = self.get_scheduler_config()
lowerCAmelCase : Dict = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(2_5 )
lowerCAmelCase : str = scheduler.timesteps
lowerCAmelCase : Optional[int] = self.dummy_model()
lowerCAmelCase : List[Any] = self.dummy_sample_deter
lowerCAmelCase : int = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase_ ):
# 1. predict noise residual
lowerCAmelCase : Any = model(UpperCamelCase_ , UpperCamelCase_ )
if i + 1 == timesteps.shape[0]:
lowerCAmelCase : Any = None
else:
lowerCAmelCase : List[str] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowerCAmelCase : Optional[int] = scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , prev_timestep=UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
lowerCAmelCase : int = pred_prev_sample
lowerCAmelCase : List[Any] = torch.sum(torch.abs(UpperCamelCase_ ) )
lowerCAmelCase : Tuple = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1E-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1E-3
def lowerCamelCase__ ( self : Dict ):
pass
def lowerCamelCase__ ( self : Tuple ):
pass
| 370 |
"""simple docstring"""
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def _snake_case ( _snake_case : Optional[int] ):
lowerCAmelCase : List[str] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
def _snake_case ( _snake_case : List[str] ):
lowerCAmelCase, lowerCAmelCase : str = emb.weight.shape
lowerCAmelCase : Optional[Any] = nn.Linear(_snake_case , _snake_case , bias=_snake_case )
lowerCAmelCase : Tuple = emb.weight.data
return lin_layer
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Dict=None ):
lowerCAmelCase : Union[str, Any] = {}
for old_key in state_dict.keys():
lowerCAmelCase : Union[str, Any] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
lowerCAmelCase : str = key.replace('''moe_layer.experts.0''' , f'''ffn.experts.expert_{expert_idx}''' )
else:
lowerCAmelCase : Optional[Any] = key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' )
if "gate" in key:
lowerCAmelCase : Any = key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
lowerCAmelCase : Tuple = key.replace('''.fc2.''' , '''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
lowerCAmelCase : int = key.replace('''.fc1.''' , '''.ffn.fc1.''' )
if ".encoder_attn." in key:
lowerCAmelCase : List[str] = key.replace('''.encoder_attn.''' , '''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
lowerCAmelCase : int = key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
lowerCAmelCase : List[str] = key.replace('''final_layer_norm''' , '''ff_layer_norm''' )
lowerCAmelCase : Tuple = state_dict[old_key]
return new_dict
def _snake_case ( _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : str = WEIGHTS_NAME ):
lowerCAmelCase : Optional[Any] = []
lowerCAmelCase : Tuple = 0
os.makedirs(_snake_case , exist_ok=_snake_case )
for expert in range(_snake_case ):
lowerCAmelCase : Any = switch_checkpoint_path + f'''-rank-{expert}.pt'''
if os.path.isfile(_snake_case ):
lowerCAmelCase : List[str] = torch.load(_snake_case )['''model''']
remove_ignore_keys_(_snake_case )
lowerCAmelCase : Any = rename_fairseq_keys(_snake_case , _snake_case )
lowerCAmelCase : Any = os.path.join(
_snake_case , weights_name.replace('''.bin''' , f'''-{len(_snake_case )+1:05d}-of-???.bin''' ) )
torch.save(_snake_case , _snake_case )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_snake_case )[0]].dtype )
# Add the last block
lowerCAmelCase : List[str] = os.path.join(_snake_case , weights_name.replace('''.bin''' , f'''-{len(_snake_case )+1:05d}-of-???.bin''' ) )
lowerCAmelCase : str = torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model''']
remove_ignore_keys_(_snake_case )
lowerCAmelCase : Union[str, Any] = rename_fairseq_keys(_snake_case , _snake_case )
lowerCAmelCase : Dict = shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_snake_case ) == 1:
lowerCAmelCase : List[str] = os.path.join(_snake_case , _snake_case )
torch.save(_snake_case , _snake_case )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_snake_case , _snake_case )
# Otherwise, let's build the index
lowerCAmelCase : Dict = {}
for idx, shard in enumerate(_snake_case ):
lowerCAmelCase : Union[str, Any] = weights_name.replace('''.bin''' , f'''-{idx+1:05d}-of-{len(_snake_case ):05d}.bin''' )
lowerCAmelCase : Any = os.path.join(_snake_case , weights_name.replace('''.bin''' , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(_snake_case , os.path.join(_snake_case , _snake_case ) )
for key in shard:
lowerCAmelCase : List[Any] = shard_file
# Add the metadata
lowerCAmelCase : Dict = {'''total_size''': total_size}
lowerCAmelCase : int = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(_snake_case , _snake_case ) , '''w''' , encoding='''utf-8''' ) as f:
lowerCAmelCase : Union[str, Any] = json.dumps(_snake_case , indent=2 , sort_keys=_snake_case ) + '''\n'''
f.write(_snake_case )
return metadata, index
if __name__ == "__main__":
snake_case__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--nllb_moe_checkpoint_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
snake_case__ : List[str] = parser.parse_args()
snake_case__ , snake_case__ : Tuple = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
snake_case__ : str = NllbMoeConfig.from_pretrained(
'''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
snake_case__ : Any = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('''Done''')
model.save_pretrained(args.pytorch_dump_folder_path)
| 314 | 0 |
'''simple docstring'''
from collections import namedtuple
import requests
from lxml import html # type: ignore
_snake_case = namedtuple('covid_data', 'cases deaths recovered')
def _A ( snake_case = "https://www.worldometers.info/coronavirus/" ) -> str:
_lowercase : int = """//div[@class = \"maincounter-number\"]/span/text()"""
return covid_data(*html.fromstring(requests.get(snake_case ).content ).xpath(snake_case ) )
_snake_case = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats()))
| 250 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
A: Tuple = logging.get_logger(__name__)
A: List[Any] = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : int = 'blenderbot-small'
__lowerCAmelCase : Any = ['past_key_values']
__lowerCAmelCase : str = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , _SCREAMING_SNAKE_CASE=50265 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2 , **_SCREAMING_SNAKE_CASE , ) -> str:
'''simple docstring'''
UpperCAmelCase : Any = vocab_size
UpperCAmelCase : Any = max_position_embeddings
UpperCAmelCase : str = d_model
UpperCAmelCase : List[Any] = encoder_ffn_dim
UpperCAmelCase : Union[str, Any] = encoder_layers
UpperCAmelCase : List[str] = encoder_attention_heads
UpperCAmelCase : Optional[Any] = decoder_ffn_dim
UpperCAmelCase : List[Any] = decoder_layers
UpperCAmelCase : Any = decoder_attention_heads
UpperCAmelCase : Union[str, Any] = dropout
UpperCAmelCase : Union[str, Any] = attention_dropout
UpperCAmelCase : int = activation_dropout
UpperCAmelCase : int = activation_function
UpperCAmelCase : Optional[Any] = init_std
UpperCAmelCase : List[str] = encoder_layerdrop
UpperCAmelCase : List[str] = decoder_layerdrop
UpperCAmelCase : List[str] = use_cache
UpperCAmelCase : Union[str, Any] = encoder_layers
UpperCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , forced_eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
@property
def SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase : Dict = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
UpperCAmelCase : int = {0: """batch"""}
UpperCAmelCase : List[Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
UpperCAmelCase : Optional[Any] = {0: """batch""", 1: """decoder_sequence"""}
UpperCAmelCase : Any = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(_SCREAMING_SNAKE_CASE , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCAmelCase : int = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
UpperCAmelCase , UpperCAmelCase : Tuple = self.num_layers
for i in range(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Optional[Any] = {0: """batch""", 2: """past_sequence + sequence"""}
UpperCAmelCase : List[Any] = {0: """batch""", 2: """past_sequence + sequence"""}
else:
UpperCAmelCase : Dict = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase : Union[str, Any] = super().outputs
else:
UpperCAmelCase : int = super(_SCREAMING_SNAKE_CASE , self ).outputs
if self.use_past:
UpperCAmelCase , UpperCAmelCase : Any = self.num_layers
for i in range(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : List[str] = {0: """batch""", 2: """past_sequence + sequence"""}
UpperCAmelCase : List[str] = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]:
'''simple docstring'''
UpperCAmelCase : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Generate decoder inputs
UpperCAmelCase : Optional[Any] = seq_length if not self.use_past else 1
UpperCAmelCase : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
UpperCAmelCase : Union[str, Any] = dict(**_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
UpperCAmelCase , UpperCAmelCase : int = common_inputs["""input_ids"""].shape
UpperCAmelCase : Any = common_inputs["""decoder_input_ids"""].shape[1]
UpperCAmelCase , UpperCAmelCase : str = self.num_attention_heads
UpperCAmelCase : Union[str, Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase : Union[str, Any] = decoder_seq_length + 3
UpperCAmelCase : Union[str, Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCAmelCase : Optional[Any] = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] , dim=1 )
UpperCAmelCase : List[str] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCAmelCase , UpperCAmelCase : List[Any] = self.num_layers
UpperCAmelCase : Optional[Any] = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) - min_num_layers
UpperCAmelCase : Optional[int] = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(_SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append(
(
torch.zeros(_SCREAMING_SNAKE_CASE ),
torch.zeros(_SCREAMING_SNAKE_CASE ),
torch.zeros(_SCREAMING_SNAKE_CASE ),
torch.zeros(_SCREAMING_SNAKE_CASE ),
) )
# TODO: test this.
UpperCAmelCase : Dict = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append((torch.zeros(_SCREAMING_SNAKE_CASE ), torch.zeros(_SCREAMING_SNAKE_CASE )) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]:
'''simple docstring'''
UpperCAmelCase : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
UpperCAmelCase , UpperCAmelCase : Optional[Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
UpperCAmelCase : Union[str, Any] = seqlen + 2
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.num_layers
UpperCAmelCase , UpperCAmelCase : Dict = self.num_attention_heads
UpperCAmelCase : Any = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase : List[Any] = common_inputs["""attention_mask"""].dtype
UpperCAmelCase : Any = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )] , dim=1 )
UpperCAmelCase : List[str] = [
(torch.zeros(_SCREAMING_SNAKE_CASE ), torch.zeros(_SCREAMING_SNAKE_CASE )) for _ in range(_SCREAMING_SNAKE_CASE )
]
return common_inputs
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]:
'''simple docstring'''
UpperCAmelCase : str = compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase : List[str] = tokenizer.num_special_tokens_to_add(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_SCREAMING_SNAKE_CASE )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase : Dict = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCAmelCase : Optional[int] = dict(tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE ) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase : Optional[int] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
elif self.task == "causal-lm":
UpperCAmelCase : Optional[int] = self._generate_dummy_inputs_for_causal_lm(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
return common_inputs
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase : List[Any] = super()._flatten_past_key_values_(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase : Dict = super(_SCREAMING_SNAKE_CASE , self )._flatten_past_key_values_(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 109 | 0 |
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class __a ( __UpperCamelCase ):
__snake_case : Optional[int] = ["""image_processor"""]
__snake_case : Union[str, Any] = """SamImageProcessor"""
def __init__( self : Tuple , UpperCAmelCase : List[Any] ):
super().__init__(UpperCAmelCase )
lowerCAmelCase_ : Any = self.image_processor
lowerCAmelCase_ : Tuple = -10
lowerCAmelCase_ : Tuple = self.image_processor.size["""longest_edge"""]
def __call__( self : Optional[Any] , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : int=None , UpperCAmelCase : int=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : Union[str, Any] , ):
lowerCAmelCase_ : List[Any] = self.image_processor(
UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
# pop arguments that are not used in the foward but used nevertheless
lowerCAmelCase_ : Tuple = encoding_image_processor["""original_sizes"""]
if hasattr(UpperCAmelCase , """numpy""" ): # Checks if Torch or TF tensor
lowerCAmelCase_ : Union[str, Any] = original_sizes.numpy()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = self._check_and_preprocess_points(
input_points=UpperCAmelCase , input_labels=UpperCAmelCase , input_boxes=UpperCAmelCase , )
lowerCAmelCase_ : Optional[int] = self._normalize_and_convert(
UpperCAmelCase , UpperCAmelCase , input_points=UpperCAmelCase , input_labels=UpperCAmelCase , input_boxes=UpperCAmelCase , return_tensors=UpperCAmelCase , )
return encoding_image_processor
def A ( self : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : List[str] , UpperCAmelCase : List[str]=None , UpperCAmelCase : Dict=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Tuple="pt" , ):
if input_points is not None:
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
lowerCAmelCase_ : List[Any] = [
self._normalize_coordinates(self.target_size , UpperCAmelCase , original_sizes[0] ) for point in input_points
]
else:
lowerCAmelCase_ : Tuple = [
self._normalize_coordinates(self.target_size , UpperCAmelCase , UpperCAmelCase )
for point, original_size in zip(UpperCAmelCase , UpperCAmelCase )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = self._pad_points_and_labels(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : str = np.array(UpperCAmelCase )
if input_labels is not None:
lowerCAmelCase_ : Tuple = np.array(UpperCAmelCase )
if input_boxes is not None:
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
lowerCAmelCase_ : int = [
self._normalize_coordinates(self.target_size , UpperCAmelCase , original_sizes[0] , is_bounding_box=UpperCAmelCase )
for box in input_boxes
]
else:
lowerCAmelCase_ : Tuple = [
self._normalize_coordinates(self.target_size , UpperCAmelCase , UpperCAmelCase , is_bounding_box=UpperCAmelCase )
for box, original_size in zip(UpperCAmelCase , UpperCAmelCase )
]
lowerCAmelCase_ : List[Any] = np.array(UpperCAmelCase )
if input_boxes is not None:
if return_tensors == "pt":
lowerCAmelCase_ : Optional[int] = torch.from_numpy(UpperCAmelCase )
# boxes batch size of 1 by default
lowerCAmelCase_ : List[Any] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
lowerCAmelCase_ : str = tf.convert_to_tensor(UpperCAmelCase )
# boxes batch size of 1 by default
lowerCAmelCase_ : Tuple = tf.expand_dims(UpperCAmelCase , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"""input_boxes""": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
lowerCAmelCase_ : Union[str, Any] = torch.from_numpy(UpperCAmelCase )
# point batch size of 1 by default
lowerCAmelCase_ : List[Any] = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
lowerCAmelCase_ : str = tf.convert_to_tensor(UpperCAmelCase )
# point batch size of 1 by default
lowerCAmelCase_ : Any = tf.expand_dims(UpperCAmelCase , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"""input_points""": input_points} )
if input_labels is not None:
if return_tensors == "pt":
lowerCAmelCase_ : Dict = torch.from_numpy(UpperCAmelCase )
# point batch size of 1 by default
lowerCAmelCase_ : List[Any] = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
lowerCAmelCase_ : int = tf.convert_to_tensor(UpperCAmelCase )
# point batch size of 1 by default
lowerCAmelCase_ : Dict = tf.expand_dims(UpperCAmelCase , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"""input_labels""": input_labels} )
return encoding_image_processor
def A ( self : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : int ):
lowerCAmelCase_ : List[Any] = max([point.shape[0] for point in input_points] )
lowerCAmelCase_ : List[str] = []
for i, point in enumerate(UpperCAmelCase ):
if point.shape[0] != expected_nb_points:
lowerCAmelCase_ : Tuple = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
lowerCAmelCase_ : Union[str, Any] = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(UpperCAmelCase )
lowerCAmelCase_ : int = processed_input_points
return input_points, input_labels
def A ( self : str , UpperCAmelCase : int , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any]=False ):
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = original_size
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.image_processor._get_preprocess_shape(UpperCAmelCase , longest_edge=UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = deepcopy(UpperCAmelCase ).astype(UpperCAmelCase )
if is_bounding_box:
lowerCAmelCase_ : Optional[int] = coords.reshape(-1 , 2 , 2 )
lowerCAmelCase_ : List[str] = coords[..., 0] * (new_w / old_w)
lowerCAmelCase_ : str = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
lowerCAmelCase_ : List[str] = coords.reshape(-1 , 4 )
return coords
def A ( self : Any , UpperCAmelCase : int=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Optional[int]=None , ):
if input_points is not None:
if hasattr(UpperCAmelCase , """numpy""" ): # Checks for TF or Torch tensor
lowerCAmelCase_ : Optional[int] = input_points.numpy().tolist()
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not isinstance(input_points[0] , UpperCAmelCase ):
raise ValueError("""Input points must be a list of list of floating points.""" )
lowerCAmelCase_ : List[str] = [np.array(UpperCAmelCase ) for input_point in input_points]
else:
lowerCAmelCase_ : Optional[int] = None
if input_labels is not None:
if hasattr(UpperCAmelCase , """numpy""" ):
lowerCAmelCase_ : List[str] = input_labels.numpy().tolist()
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not isinstance(input_labels[0] , UpperCAmelCase ):
raise ValueError("""Input labels must be a list of list integers.""" )
lowerCAmelCase_ : Optional[Any] = [np.array(UpperCAmelCase ) for label in input_labels]
else:
lowerCAmelCase_ : List[str] = None
if input_boxes is not None:
if hasattr(UpperCAmelCase , """numpy""" ):
lowerCAmelCase_ : str = input_boxes.numpy().tolist()
if (
not isinstance(UpperCAmelCase , UpperCAmelCase )
or not isinstance(input_boxes[0] , UpperCAmelCase )
or not isinstance(input_boxes[0][0] , UpperCAmelCase )
):
raise ValueError("""Input boxes must be a list of list of list of floating points.""" )
lowerCAmelCase_ : int = [np.array(UpperCAmelCase ).astype(np.floataa ) for box in input_boxes]
else:
lowerCAmelCase_ : Union[str, Any] = None
return input_points, input_labels, input_boxes
@property
def A ( self : Tuple ):
lowerCAmelCase_ : str = self.image_processor.model_input_names
return list(dict.fromkeys(UpperCAmelCase ) )
def A ( self : Dict , *UpperCAmelCase : Any , **UpperCAmelCase : List[str] ):
return self.image_processor.post_process_masks(*UpperCAmelCase , **UpperCAmelCase )
| 28 |
from math import factorial, pi
def __UpperCamelCase ( lowercase__ : float , lowercase__ : int = 30 ) -> float:
'''simple docstring'''
if not isinstance(lowercase__ , (int, float) ):
raise ValueError("""maclaurin_sin() requires either an int or float for theta""" )
if not isinstance(lowercase__ , lowercase__ ) or accuracy <= 0:
raise ValueError("""maclaurin_sin() requires a positive int for accuracy""" )
lowerCAmelCase_ : Optional[int] = float(lowercase__ )
lowerCAmelCase_ : Union[str, Any] = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(lowercase__ ) )
def __UpperCamelCase ( lowercase__ : float , lowercase__ : int = 30 ) -> float:
'''simple docstring'''
if not isinstance(lowercase__ , (int, float) ):
raise ValueError("""maclaurin_cos() requires either an int or float for theta""" )
if not isinstance(lowercase__ , lowercase__ ) or accuracy <= 0:
raise ValueError("""maclaurin_cos() requires a positive int for accuracy""" )
lowerCAmelCase_ : int = float(lowercase__ )
lowerCAmelCase_ : Optional[int] = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 28 | 1 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
A : Union[str, Any] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
A : List[str] = direct_transformers_import(PATH_TO_TRANSFORMERS)
A : Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
A : int = {
# used to compute the property `self.chunk_length`
'EncodecConfig': ['overlap'],
# used as `self.bert_model = BertModel(config, ...)`
'DPRConfig': True,
# not used in modeling files, but it's an important information
'FSMTConfig': ['langs'],
# used internally in the configuration class file
'GPTNeoConfig': ['attention_types'],
# used internally in the configuration class file
'EsmConfig': ['is_folding_model'],
# used during training (despite we don't have training script for these models yet)
'Mask2FormerConfig': ['ignore_value'],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'OneFormerConfig': ['ignore_value', 'norm'],
# used during preprocessing and collation, see `collating_graphormer.py`
'GraphormerConfig': ['spatial_pos_max'],
# used internally in the configuration class file
'T5Config': ['feed_forward_proj'],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'MT5Config': ['feed_forward_proj', 'tokenizer_class'],
'UMT5Config': ['feed_forward_proj', 'tokenizer_class'],
# used internally in the configuration class file
'LongT5Config': ['feed_forward_proj'],
# used internally in the configuration class file
'SwitchTransformersConfig': ['feed_forward_proj'],
# having default values other than `1e-5` - we can't fix them without breaking
'BioGptConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'GLPNConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'SegformerConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'CvtConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'PerceiverConfig': ['layer_norm_eps'],
# used internally to calculate the feature size
'InformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'AutoformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate `mlp_dim`
'SamVisionConfig': ['mlp_ratio'],
# For (head) training, but so far not implemented
'ClapAudioConfig': ['num_classes'],
# Not used, but providing useful information to users
'SpeechT5HifiGanConfig': ['sampling_rate'],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'CLIPSegConfig': True,
'DeformableDetrConfig': True,
'DetaConfig': True,
'DinatConfig': True,
'DonutSwinConfig': True,
'EfficientFormerConfig': True,
'FSMTConfig': True,
'JukeboxConfig': True,
'LayoutLMv2Config': True,
'MaskFormerSwinConfig': True,
'MT5Config': True,
'NatConfig': True,
'OneFormerConfig': True,
'PerceiverConfig': True,
'RagConfig': True,
'SpeechT5Config': True,
'SwinConfig': True,
'Swin2SRConfig': True,
'Swinv2Config': True,
'SwitchTransformersConfig': True,
'TableTransformerConfig': True,
'TapasConfig': True,
'TransfoXLConfig': True,
'UniSpeechConfig': True,
'UniSpeechSatConfig': True,
'WavLMConfig': True,
'WhisperConfig': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'JukeboxPriorConfig': True,
# TODO: @Younes (for `is_decoder`)
'Pix2StructTextConfig': True,
}
)
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : int , __magic_name__ : List[Any] , __magic_name__ : Dict ) -> Tuple:
"""simple docstring"""
lowercase__ = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'''config.{attribute}''' in modeling_source
or f'''getattr(config, \"{attribute}\"''' in modeling_source
or f'''getattr(self.config, \"{attribute}\"''' in modeling_source
):
lowercase__ = True
# Deal with multi-line cases
elif (
re.search(
Rf'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"''' , _UpperCamelCase , )
is not None
):
lowercase__ = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
lowercase__ = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
lowercase__ = [
"""bos_index""",
"""eos_index""",
"""pad_index""",
"""unk_index""",
"""mask_index""",
"""image_size""",
"""use_cache""",
"""out_features""",
"""out_indices""",
]
lowercase__ = ["""encoder_no_repeat_ngram_size"""]
# Special cases to be allowed
lowercase__ = True
if not attribute_used:
lowercase__ = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
lowercase__ = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
lowercase__ = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
lowercase__ = True
elif attribute.endswith("""_token_id""" ):
lowercase__ = True
# configuration class specific cases
if not case_allowed:
lowercase__ = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
lowercase__ = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def UpperCamelCase ( __magic_name__ : str ) -> int:
"""simple docstring"""
lowercase__ = dict(inspect.signature(config_class.__init__ ).parameters )
lowercase__ = [x for x in list(signature.keys() ) if x not in ["""self""", """kwargs"""]]
lowercase__ = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
lowercase__ = {}
if len(config_class.attribute_map ) > 0:
lowercase__ = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
lowercase__ = inspect.getsourcefile(_UpperCamelCase )
lowercase__ = os.path.dirname(_UpperCamelCase )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
lowercase__ = [os.path.join(_UpperCamelCase , _UpperCamelCase ) for fn in os.listdir(_UpperCamelCase ) if fn.startswith("""modeling_""" )]
# Get the source code strings
lowercase__ = []
for path in modeling_paths:
if os.path.isfile(_UpperCamelCase ):
with open(_UpperCamelCase ) as fp:
modeling_sources.append(fp.read() )
lowercase__ = []
for config_param, default_value in zip(_UpperCamelCase , _UpperCamelCase ):
# `attributes` here is all the variant names for `config_param`
lowercase__ = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
unused_attributes.append(attributes[0] )
return sorted(_UpperCamelCase )
def UpperCamelCase ( ) -> Dict:
"""simple docstring"""
lowercase__ = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
lowercase__ = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda __magic_name__ : inspect.isclass(_UpperCamelCase )
and issubclass(_UpperCamelCase , _UpperCamelCase )
and inspect.getmodule(_UpperCamelCase ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
lowercase__ = check_config_attributes_being_used(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
lowercase__ = unused_attributes
if len(_UpperCamelCase ) > 0:
lowercase__ = """The following configuration classes contain unused attributes in the corresponding modeling files:\n"""
for name, attributes in configs_with_unused_attributes.items():
error += f'''{name}: {attributes}\n'''
raise ValueError(_UpperCamelCase )
if __name__ == "__main__":
check_config_attributes()
| 305 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase=0.01 , __UpperCAmelCase=1000 ):
'''simple docstring'''
__lowerCamelCase = p_stop
__lowerCamelCase = max_length
def __iter__( self ):
'''simple docstring'''
__lowerCamelCase = 0
__lowerCamelCase = False
while not stop and count < self.max_length:
yield count
count += 1
__lowerCamelCase = random.random() < self.p_stop
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True ):
'''simple docstring'''
__lowerCamelCase = [
BatchSamplerShard(__UpperCAmelCase , 2 , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
for i in range(2 )
]
__lowerCamelCase = [list(__UpperCAmelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(__UpperCAmelCase ) for shard in batch_sampler_shards] , [len(__UpperCAmelCase ) for e in expected] )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
# Check the shards when the dataset is a round multiple of total batch size.
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
# Check the shards when the dataset is very small.
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
# Check the shards when the dataset is a round multiple of batch size.
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
# Check the shards when the dataset is very small.
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
# Check the shards when the dataset is a round multiple of total batch size.
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is very small.
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[[0, 1]], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
# Check the shards when the dataset is a round multiple of batch size.
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is very small.
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[[0, 1]], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
__lowerCamelCase = [BatchSamplerShard(__UpperCAmelCase , 2 , __UpperCAmelCase , even_batches=__UpperCAmelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=2 , __UpperCAmelCase=False ):
'''simple docstring'''
random.seed(__UpperCAmelCase )
__lowerCamelCase = list(__UpperCAmelCase )
__lowerCamelCase = [
IterableDatasetShard(
__UpperCAmelCase , batch_size=__UpperCAmelCase , drop_last=__UpperCAmelCase , num_processes=__UpperCAmelCase , process_index=__UpperCAmelCase , split_batches=__UpperCAmelCase , )
for i in range(__UpperCAmelCase )
]
__lowerCamelCase = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(__UpperCAmelCase )
iterable_dataset_lists.append(list(__UpperCAmelCase ) )
__lowerCamelCase = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__lowerCamelCase = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
self.assertTrue(len(__UpperCAmelCase ) % shard_batch_size == 0 )
__lowerCamelCase = []
for idx in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(__UpperCAmelCase ) < len(__UpperCAmelCase ):
reference += reference
self.assertListEqual(__UpperCAmelCase , reference[: len(__UpperCAmelCase )] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 42
__lowerCamelCase = RandomIterableDataset()
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
# Edge case with a very small dataset
__lowerCamelCase = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = BatchSampler(range(16 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = SkipBatchSampler(__UpperCAmelCase , 2 )
self.assertListEqual(list(__UpperCAmelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = DataLoader(list(range(16 ) ) , batch_size=4 )
__lowerCamelCase = skip_first_batches(__UpperCAmelCase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(__UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def lowerCamelCase ( self ):
'''simple docstring'''
Accelerator()
__lowerCamelCase = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(__UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 330 | 0 |
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
UpperCamelCase = str(bin(a__ ) )[2:] # remove the leading "0b"
UpperCamelCase = str(bin(a__ ) )[2:] # remove the leading "0b"
UpperCamelCase = max(len(a__ ) , len(a__ ) )
return "0b" + "".join(
str(int(char_a == """1""" and char_b == """1""" ) )
for char_a, char_b in zip(a_binary.zfill(a__ ) , b_binary.zfill(a__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369 |
def lowercase( UpperCamelCase_ ) -> int:
'''simple docstring'''
UpperCamelCase = len(UpperCamelCase_ )
UpperCamelCase = len(matrix[0] )
UpperCamelCase = min(UpperCamelCase_ , UpperCamelCase_ )
for row in range(UpperCamelCase_ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , UpperCamelCase_ ):
UpperCamelCase = matrix[col][row] / matrix[row][row]
for i in range(UpperCamelCase_ , UpperCamelCase_ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
UpperCamelCase = True
for i in range(row + 1 , UpperCamelCase_ ):
if matrix[i][row] != 0:
UpperCamelCase , UpperCamelCase = matrix[i], matrix[row]
UpperCamelCase = False
break
if reduce:
rank -= 1
for i in range(UpperCamelCase_ ):
UpperCamelCase = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 165 | 0 |
# flake8: noqa
# Lint as: python3
UpperCAmelCase__ = [
"VerificationMode",
"Version",
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
"experimental",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 339 |
import math
import unittest
def A ( _UpperCAmelCase : int ) -> bool:
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
self.assertTrue(is_prime(2))
self.assertTrue(is_prime(3))
self.assertTrue(is_prime(5))
self.assertTrue(is_prime(7))
self.assertTrue(is_prime(11))
self.assertTrue(is_prime(13))
self.assertTrue(is_prime(17))
self.assertTrue(is_prime(19))
self.assertTrue(is_prime(23))
self.assertTrue(is_prime(29))
def _lowerCamelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
with self.assertRaises(A):
is_prime(-19)
self.assertFalse(
is_prime(0) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2))
self.assertFalse(is_prime(2 * 3))
self.assertFalse(is_prime(3 * 3))
self.assertFalse(is_prime(3 * 5))
self.assertFalse(is_prime(3 * 5 * 7))
if __name__ == "__main__":
unittest.main()
| 339 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 366 |
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCAmelCase__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def snake_case_ ( A_ : str, A_ : str ):
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
_lowerCamelCase : Any = XLMProphetNetForConditionalGenerationOld.from_pretrained(A_ )
_lowerCamelCase , _lowerCamelCase : List[str] = XLMProphetNetForConditionalGeneration.from_pretrained(
A_, output_loading_info=A_ )
else:
_lowerCamelCase : str = ProphetNetForConditionalGenerationOld.from_pretrained(A_ )
_lowerCamelCase , _lowerCamelCase : Any = ProphetNetForConditionalGeneration.from_pretrained(
A_, output_loading_info=A_ )
_lowerCamelCase : Optional[Any] = ['''key_proj''', '''value_proj''', '''query_proj''']
_lowerCamelCase : List[Any] = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
_lowerCamelCase : Union[str, Any] = key.split('''.''' )
if attributes[0] == "lm_head":
_lowerCamelCase : str = prophet
_lowerCamelCase : List[Any] = prophet_old
else:
_lowerCamelCase : Optional[int] = prophet.prophetnet
_lowerCamelCase : Optional[Any] = prophet_old.model
_lowerCamelCase : Any = False
for attribute in attributes:
if attribute in mapping:
_lowerCamelCase : Optional[int] = mapping[attribute]
if not hasattr(A_, A_ ) and len(A_ ) > 0:
_lowerCamelCase : int = attribute
elif hasattr(A_, A_ ):
_lowerCamelCase : int = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_lowerCamelCase : Optional[int] = old_model.weight
logger.info(F'''{attribute} is initialized.''' )
_lowerCamelCase : List[str] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_lowerCamelCase : int = old_model.bias
logger.info(F'''{attribute} is initialized''' )
_lowerCamelCase : Union[str, Any] = True
break
elif attribute in special_keys and hasattr(A_, '''in_proj_weight''' ):
_lowerCamelCase : Tuple = old_model.in_proj_weight.shape[0] // 3
_lowerCamelCase : List[Any] = getattr(A_, A_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_lowerCamelCase : Optional[Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_lowerCamelCase : int = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_lowerCamelCase : int = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_lowerCamelCase : str = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_lowerCamelCase : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_lowerCamelCase : str = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_lowerCamelCase : Dict = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
_lowerCamelCase : List[str] = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
_lowerCamelCase : Optional[Any] = True
break
if attribute.isdigit():
_lowerCamelCase : Optional[int] = model[int(A_ )]
_lowerCamelCase : List[Any] = old_model[int(A_ )]
else:
_lowerCamelCase : List[str] = getattr(A_, A_ )
if old_attribute == "":
_lowerCamelCase : str = old_model
else:
if not hasattr(A_, A_ ):
raise ValueError(F'''{old_model} does not have {old_attribute}''' )
_lowerCamelCase : Optional[int] = getattr(A_, A_ )
if not is_key_init:
raise ValueError(F'''{key} was not correctly initialized!''' )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(A_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 175 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase = {
'''configuration_deberta''': ['''DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DebertaConfig''', '''DebertaOnnxConfig'''],
'''tokenization_deberta''': ['''DebertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''DebertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DebertaForMaskedLM''',
'''DebertaForQuestionAnswering''',
'''DebertaForSequenceClassification''',
'''DebertaForTokenClassification''',
'''DebertaModel''',
'''DebertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDebertaForMaskedLM''',
'''TFDebertaForQuestionAnswering''',
'''TFDebertaForSequenceClassification''',
'''TFDebertaForTokenClassification''',
'''TFDebertaModel''',
'''TFDebertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 74 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Tuple = '''dpt'''
def __init__( self : str ,A_ : Tuple=768 ,A_ : int=12 ,A_ : Optional[int]=12 ,A_ : Optional[int]=3072 ,A_ : List[str]="gelu" ,A_ : str=0.0 ,A_ : int=0.0 ,A_ : str=0.02 ,A_ : str=1e-12 ,A_ : str=384 ,A_ : Dict=16 ,A_ : Union[str, Any]=3 ,A_ : Dict=False ,A_ : Any=True ,A_ : Optional[int]=[2, 5, 8, 11] ,A_ : Optional[Any]="project" ,A_ : Tuple=[4, 2, 1, 0.5] ,A_ : int=[96, 192, 384, 768] ,A_ : int=256 ,A_ : str=-1 ,A_ : Optional[int]=False ,A_ : Optional[int]=True ,A_ : Union[str, Any]=0.4 ,A_ : Union[str, Any]=255 ,A_ : Union[str, Any]=0.1 ,A_ : List[str]=[1, 1024, 24, 24] ,A_ : List[str]=[0, 1] ,A_ : List[Any]=None ,**A_ : Tuple ,) -> Union[str, Any]:
super().__init__(**A_ )
A = hidden_size
A = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('Initializing the config with a `BiT` backbone.' )
A = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
}
A = BitConfig(**A_ )
elif isinstance(A_ ,A_ ):
logger.info('Initializing the config with a `BiT` backbone.' )
A = BitConfig(**A_ )
elif isinstance(A_ ,A_ ):
A = backbone_config
else:
raise ValueError(
F'backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.' )
A = backbone_featmap_shape
A = neck_ignore_stages
if readout_type != "project":
raise ValueError('Readout type must be \'project\' when using `DPT-hybrid` mode.' )
else:
A = None
A = None
A = []
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = patch_size
A = num_channels
A = qkv_bias
A = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('Readout_type must be one of [\'ignore\', \'add\', \'project\']' )
A = readout_type
A = reassemble_factors
A = neck_hidden_sizes
A = fusion_hidden_size
A = head_in_index
A = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
A = use_auxiliary_head
A = auxiliary_loss_weight
A = semantic_loss_ignore_index
A = semantic_classifier_dropout
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
A = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
A = self.backbone_config.to_dict()
A = self.__class__.model_type
return output
| 74 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _UpperCAmelCase ( lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = StableDiffusionPanoramaPipeline
__A = TEXT_TO_IMAGE_PARAMS
__A = TEXT_TO_IMAGE_BATCH_PARAMS
__A = TEXT_TO_IMAGE_IMAGE_PARAMS
__A = TEXT_TO_IMAGE_IMAGE_PARAMS
def __UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
_UpperCamelCase = DDIMScheduler()
torch.manual_seed(0)
_UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0)
_UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_UpperCamelCase = CLIPTextModel(lowercase_)
_UpperCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
_UpperCamelCase = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __UpperCAmelCase ( self : int , lowercase_ : Optional[int] , lowercase_ : List[Any]=0) -> List[str]:
"""simple docstring"""
_UpperCamelCase = torch.manual_seed(lowercase_)
_UpperCamelCase = {
"prompt": "a photo of the dolomites",
"generator": generator,
# Setting height and width to None to prevent OOMs on CPU.
"height": None,
"width": None,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
_UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = StableDiffusionPanoramaPipeline(**lowercase_)
_UpperCamelCase = sd_pipe.to(lowercase_)
sd_pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = self.get_dummy_inputs(lowercase_)
_UpperCamelCase = sd_pipe(**lowercase_).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array([0.61_86, 0.53_74, 0.49_15, 0.41_35, 0.41_14, 0.45_63, 0.51_28, 0.49_77, 0.47_57])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCAmelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
super().test_inference_batch_consistent(batch_sizes=[1, 2])
def __UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5e-3)
def __UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
_UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = StableDiffusionPanoramaPipeline(**lowercase_)
_UpperCamelCase = sd_pipe.to(lowercase_)
sd_pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = self.get_dummy_inputs(lowercase_)
_UpperCamelCase = "french fries"
_UpperCamelCase = sd_pipe(**lowercase_ , negative_prompt=lowercase_)
_UpperCamelCase = output.images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCAmelCase ( self : Dict) -> Any:
"""simple docstring"""
_UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = StableDiffusionPanoramaPipeline(**lowercase_)
_UpperCamelCase = sd_pipe.to(lowercase_)
sd_pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = self.get_dummy_inputs(lowercase_)
_UpperCamelCase = sd_pipe(**lowercase_ , view_batch_size=2)
_UpperCamelCase = output.images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCAmelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear")
_UpperCamelCase = StableDiffusionPanoramaPipeline(**lowercase_)
_UpperCamelCase = sd_pipe.to(lowercase_)
sd_pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = self.get_dummy_inputs(lowercase_)
_UpperCamelCase = sd_pipe(**lowercase_).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array([0.40_24, 0.65_10, 0.49_01, 0.53_78, 0.58_13, 0.56_22, 0.47_95, 0.44_67, 0.49_52])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
_UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = PNDMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , skip_prk_steps=lowercase_)
_UpperCamelCase = StableDiffusionPanoramaPipeline(**lowercase_)
_UpperCamelCase = sd_pipe.to(lowercase_)
sd_pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = self.get_dummy_inputs(lowercase_)
_UpperCamelCase = sd_pipe(**lowercase_).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array([0.63_91, 0.62_91, 0.48_61, 0.51_34, 0.55_52, 0.45_78, 0.50_32, 0.50_23, 0.45_39])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Any=0) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = torch.manual_seed(lowercase_)
_UpperCamelCase = {
"prompt": "a photo of the dolomites",
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def __UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = "stabilityai/stable-diffusion-2-base"
_UpperCamelCase = DDIMScheduler.from_pretrained(lowercase_ , subfolder="scheduler")
_UpperCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
pipe.enable_attention_slicing()
_UpperCamelCase = self.get_inputs()
_UpperCamelCase = pipe(**lowercase_).images
_UpperCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_UpperCamelCase = np.array(
[
0.36_96_83_92,
0.27_02_53_72,
0.32_44_67_66,
0.28_37_93_87,
0.36_36_32_74,
0.30_73_33_47,
0.27_10_00_27,
0.27_05_41_25,
0.25_53_60_96,
])
assert np.abs(expected_slice - image_slice).max() < 1e-2
def __UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-base" , safety_checker=lowercase_)
_UpperCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
pipe.enable_attention_slicing()
_UpperCamelCase = self.get_inputs()
_UpperCamelCase = pipe(**lowercase_).images
_UpperCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_UpperCamelCase = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
])
assert np.abs(expected_slice - image_slice).max() < 1e-3
def __UpperCAmelCase ( self : Any) -> Any:
"""simple docstring"""
_UpperCamelCase = 0
def callback_fn(lowercase_ : int , lowercase_ : int , lowercase_ : torch.FloatTensor) -> None:
_UpperCamelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_UpperCamelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_UpperCamelCase = latents[0, -3:, -3:, -1]
_UpperCamelCase = np.array(
[
0.18_68_18_69,
0.33_90_78_16,
0.5_36_12_76,
0.14_43_28_65,
-0.02_85_66_11,
-0.73_94_11_23,
0.23_39_79_87,
0.47_32_26_82,
-0.37_82_31_64,
])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
elif step == 2:
_UpperCamelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_UpperCamelCase = latents[0, -3:, -3:, -1]
_UpperCamelCase = np.array(
[
0.18_53_96_45,
0.33_98_72_48,
0.5_37_85_59,
0.14_43_71_42,
-0.02_45_52_61,
-0.7_33_83_17,
0.23_99_07_55,
0.47_35_62_72,
-0.3_78_65_05,
])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
_UpperCamelCase = False
_UpperCamelCase = "stabilityai/stable-diffusion-2-base"
_UpperCamelCase = DDIMScheduler.from_pretrained(lowercase_ , subfolder="scheduler")
_UpperCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_)
_UpperCamelCase = pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
pipe.enable_attention_slicing()
_UpperCamelCase = self.get_inputs()
pipe(**lowercase_ , callback=lowercase_ , callback_steps=1)
assert callback_fn.has_been_called
assert number_of_steps == 3
def __UpperCAmelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCamelCase = "stabilityai/stable-diffusion-2-base"
_UpperCamelCase = DDIMScheduler.from_pretrained(lowercase_ , subfolder="scheduler")
_UpperCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_)
_UpperCamelCase = pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = self.get_inputs()
_UpperCamelCase = pipe(**lowercase_)
_UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 63 |
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Any , *lowercase_ : Optional[int] , **lowercase_ : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *lowercase_ : Optional[int] , **lowercase_ : Tuple) -> int:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *lowercase_ : Optional[Any] , **lowercase_ : List[Any]) -> int:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : List[str] , *lowercase_ : List[str] , **lowercase_ : Dict) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , *lowercase_ : Tuple , **lowercase_ : str) -> Dict:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *lowercase_ : Optional[Any] , **lowercase_ : int) -> int:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Tuple , *lowercase_ : str , **lowercase_ : Optional[int]) -> str:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Tuple , *lowercase_ : Tuple , **lowercase_ : Any) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Tuple , *lowercase_ : Tuple , **lowercase_ : List[Any]) -> int:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Any , *lowercase_ : List[str] , **lowercase_ : Dict) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *lowercase_ : Optional[Any] , **lowercase_ : str) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , *lowercase_ : Optional[Any] , **lowercase_ : Union[str, Any]) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Any , *lowercase_ : Dict , **lowercase_ : Any) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Tuple , *lowercase_ : Dict , **lowercase_ : int) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Tuple , *lowercase_ : Optional[int] , **lowercase_ : str) -> str:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : int , *lowercase_ : List[Any] , **lowercase_ : Dict) -> Any:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *lowercase_ : int , **lowercase_ : List[str]) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Dict , *lowercase_ : str , **lowercase_ : List[str]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : List[str] , *lowercase_ : Tuple , **lowercase_ : List[str]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *lowercase_ : str , **lowercase_ : int) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *lowercase_ : Any , **lowercase_ : List[Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : List[str] , *lowercase_ : Optional[int] , **lowercase_ : Any) -> int:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *lowercase_ : int , **lowercase_ : Tuple) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Any , *lowercase_ : Optional[int] , **lowercase_ : Dict) -> int:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Optional[Any] , *lowercase_ : Union[str, Any] , **lowercase_ : Any) -> str:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *lowercase_ : Optional[Any] , **lowercase_ : Tuple) -> str:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Dict , *lowercase_ : Any , **lowercase_ : Dict) -> Dict:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Dict , *lowercase_ : List[str] , **lowercase_ : Optional[Any]) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Dict , *lowercase_ : List[str] , **lowercase_ : List[str]) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Any , *lowercase_ : Any , **lowercase_ : Any) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : List[Any] , *lowercase_ : str , **lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : List[str] , *lowercase_ : Dict , **lowercase_ : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Dict , *lowercase_ : str , **lowercase_ : Union[str, Any]) -> int:
"""simple docstring"""
requires_backends(cls , ["torch"])
def lowerCAmelCase__ ( *a__ , **a__ ) ->Optional[Any]:
'''simple docstring'''
requires_backends(a__ , ["torch"] )
def lowerCAmelCase__ ( *a__ , **a__ ) ->Any:
'''simple docstring'''
requires_backends(a__ , ["torch"] )
def lowerCAmelCase__ ( *a__ , **a__ ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(a__ , ["torch"] )
def lowerCAmelCase__ ( *a__ , **a__ ) ->List[Any]:
'''simple docstring'''
requires_backends(a__ , ["torch"] )
def lowerCAmelCase__ ( *a__ , **a__ ) ->List[Any]:
'''simple docstring'''
requires_backends(a__ , ["torch"] )
def lowerCAmelCase__ ( *a__ , **a__ ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(a__ , ["torch"] )
def lowerCAmelCase__ ( *a__ , **a__ ) ->List[str]:
'''simple docstring'''
requires_backends(a__ , ["torch"] )
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : str , *lowercase_ : List[str] , **lowercase_ : List[Any]) -> Dict:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Any , *lowercase_ : List[str] , **lowercase_ : Optional[Any]) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : List[str] , *lowercase_ : List[str] , **lowercase_ : Tuple) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : List[Any] , *lowercase_ : List[Any] , **lowercase_ : int) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : str , *lowercase_ : List[str] , **lowercase_ : Tuple) -> Dict:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : int , *lowercase_ : Tuple , **lowercase_ : int) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : str , *lowercase_ : List[str] , **lowercase_ : int) -> str:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Tuple , *lowercase_ : List[str] , **lowercase_ : Any) -> int:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Any , *lowercase_ : Dict , **lowercase_ : Dict) -> Dict:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Union[str, Any] , *lowercase_ : Any , **lowercase_ : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *lowercase_ : Optional[Any] , **lowercase_ : Dict) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : str , *lowercase_ : Union[str, Any] , **lowercase_ : int) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Optional[Any] , *lowercase_ : int , **lowercase_ : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : Optional[int]) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *lowercase_ : int , **lowercase_ : Tuple) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Optional[int] , *lowercase_ : List[Any] , **lowercase_ : Any) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , *lowercase_ : Dict , **lowercase_ : str) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Any , *lowercase_ : List[str] , **lowercase_ : Dict) -> Dict:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Dict , *lowercase_ : Tuple , **lowercase_ : Optional[int]) -> Any:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Any , *lowercase_ : Any , **lowercase_ : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Tuple , *lowercase_ : Union[str, Any] , **lowercase_ : Tuple) -> int:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Union[str, Any] , *lowercase_ : List[str] , **lowercase_ : List[str]) -> Tuple:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Dict , *lowercase_ : Tuple , **lowercase_ : Dict) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : List[str] , *lowercase_ : int , **lowercase_ : Union[str, Any]) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : Any) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Tuple , *lowercase_ : Optional[Any] , **lowercase_ : Optional[Any]) -> Dict:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : List[str] , *lowercase_ : Tuple , **lowercase_ : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : str , *lowercase_ : str , **lowercase_ : str) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : List[str] , *lowercase_ : List[Any] , **lowercase_ : int) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : int , *lowercase_ : int , **lowercase_ : int) -> int:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Tuple , *lowercase_ : str , **lowercase_ : Optional[Any]) -> Tuple:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Dict , *lowercase_ : int , **lowercase_ : List[str]) -> int:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Tuple , *lowercase_ : List[str] , **lowercase_ : Any) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Union[str, Any] , *lowercase_ : str , **lowercase_ : Any) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : str , *lowercase_ : Optional[Any] , **lowercase_ : Any) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *lowercase_ : List[Any] , **lowercase_ : Any) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : int , *lowercase_ : List[str] , **lowercase_ : List[Any]) -> str:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : int , *lowercase_ : int , **lowercase_ : Optional[int]) -> str:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Any , *lowercase_ : List[str] , **lowercase_ : Optional[int]) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Optional[Any] , *lowercase_ : int , **lowercase_ : Any) -> int:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *lowercase_ : List[Any] , **lowercase_ : Optional[int]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : List[Any] , *lowercase_ : List[str] , **lowercase_ : Optional[int]) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : str , *lowercase_ : Optional[int] , **lowercase_ : Optional[Any]) -> Tuple:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : List[Any] , *lowercase_ : Tuple , **lowercase_ : Union[str, Any]) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Dict , *lowercase_ : Optional[Any] , **lowercase_ : List[str]) -> str:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : List[str] , *lowercase_ : str , **lowercase_ : str) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : List[Any] , *lowercase_ : str , **lowercase_ : int) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , *lowercase_ : int , **lowercase_ : List[str]) -> int:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Optional[Any] , *lowercase_ : Optional[int] , **lowercase_ : List[str]) -> Dict:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Any , *lowercase_ : Optional[int] , **lowercase_ : Tuple) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : List[Any] , *lowercase_ : Optional[int] , **lowercase_ : Optional[int]) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : int , *lowercase_ : Dict , **lowercase_ : Dict) -> int:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *lowercase_ : str , **lowercase_ : Optional[int]) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : int , *lowercase_ : Optional[int] , **lowercase_ : Union[str, Any]) -> int:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Union[str, Any] , *lowercase_ : Any , **lowercase_ : List[str]) -> List[str]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Tuple , *lowercase_ : Any , **lowercase_ : Optional[Any]) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *lowercase_ : Tuple , **lowercase_ : str) -> str:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Optional[int] , *lowercase_ : str , **lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *lowercase_ : List[str] , **lowercase_ : str) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Tuple , *lowercase_ : Optional[Any] , **lowercase_ : List[Any]) -> Dict:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Dict , *lowercase_ : Dict , **lowercase_ : Tuple) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *lowercase_ : Union[str, Any] , **lowercase_ : Dict) -> str:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : List[str] , *lowercase_ : Tuple , **lowercase_ : str) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : List[str] , *lowercase_ : str , **lowercase_ : Any) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *lowercase_ : Optional[Any] , **lowercase_ : str) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *lowercase_ : Any , **lowercase_ : List[Any]) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Dict , *lowercase_ : Tuple , **lowercase_ : List[Any]) -> int:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Tuple , *lowercase_ : Dict , **lowercase_ : List[Any]) -> Dict:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Any , *lowercase_ : List[str] , **lowercase_ : List[Any]) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Union[str, Any] , *lowercase_ : List[Any] , **lowercase_ : Union[str, Any]) -> Tuple:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Dict , *lowercase_ : Dict , **lowercase_ : Optional[int]) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Any , *lowercase_ : List[str] , **lowercase_ : Any) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Tuple , *lowercase_ : Any , **lowercase_ : Union[str, Any]) -> Any:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : int , *lowercase_ : List[str] , **lowercase_ : Union[str, Any]) -> str:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Tuple , *lowercase_ : Optional[Any] , **lowercase_ : Dict) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : str , *lowercase_ : str , **lowercase_ : Optional[int]) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : int , *lowercase_ : List[Any] , **lowercase_ : Tuple) -> int:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Any , *lowercase_ : Dict , **lowercase_ : Tuple) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Optional[int] , *lowercase_ : str , **lowercase_ : Union[str, Any]) -> List[str]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Dict , *lowercase_ : int , **lowercase_ : Optional[int]) -> Dict:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *lowercase_ : str , **lowercase_ : List[str]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Tuple , *lowercase_ : Optional[int] , **lowercase_ : str) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , *lowercase_ : Union[str, Any] , **lowercase_ : List[Any]) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : int , *lowercase_ : Union[str, Any] , **lowercase_ : int) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : List[str] , *lowercase_ : Dict , **lowercase_ : List[str]) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , *lowercase_ : Tuple , **lowercase_ : List[str]) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *lowercase_ : Dict , **lowercase_ : Optional[Any]) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Union[str, Any] , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any]) -> int:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Dict , *lowercase_ : Dict , **lowercase_ : List[Any]) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , *lowercase_ : str , **lowercase_ : List[Any]) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : List[str] , *lowercase_ : Union[str, Any] , **lowercase_ : str) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , *lowercase_ : Union[str, Any] , **lowercase_ : Dict) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , *lowercase_ : str , **lowercase_ : Any) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Any , *lowercase_ : Tuple , **lowercase_ : str) -> int:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Any , *lowercase_ : str , **lowercase_ : List[str]) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *lowercase_ : List[Any] , **lowercase_ : Union[str, Any]) -> str:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : List[Any] , *lowercase_ : str , **lowercase_ : Any) -> str:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : str , *lowercase_ : Any , **lowercase_ : str) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : str , *lowercase_ : Optional[Any] , **lowercase_ : Union[str, Any]) -> Dict:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Optional[int] , *lowercase_ : str , **lowercase_ : Any) -> Any:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *lowercase_ : Tuple , **lowercase_ : Union[str, Any]) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *lowercase_ : Dict , **lowercase_ : Tuple) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Dict , *lowercase_ : Union[str, Any] , **lowercase_ : List[str]) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : List[Any] , *lowercase_ : Any , **lowercase_ : List[Any]) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *lowercase_ : Any , **lowercase_ : List[str]) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : str , *lowercase_ : str , **lowercase_ : int) -> str:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Dict , *lowercase_ : List[Any] , **lowercase_ : Dict) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Tuple , *lowercase_ : List[Any] , **lowercase_ : Optional[Any]) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Tuple , *lowercase_ : Union[str, Any] , **lowercase_ : List[Any]) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Any , *lowercase_ : List[str] , **lowercase_ : List[str]) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : Tuple) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : str , *lowercase_ : Dict , **lowercase_ : Optional[int]) -> Tuple:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Tuple , *lowercase_ : Tuple , **lowercase_ : Optional[int]) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : str , *lowercase_ : Dict , **lowercase_ : str) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Optional[int] , *lowercase_ : Optional[Any] , **lowercase_ : Any) -> Dict:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : int , *lowercase_ : List[str] , **lowercase_ : int) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *lowercase_ : Tuple , **lowercase_ : Any) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch"])
| 63 | 1 |
"""simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("""repo_id""" , ["""canonical_dataset_name""", """org-name/dataset-name"""] )
@pytest.mark.parametrize("""path""" , ["""filename.csv""", """filename with blanks.csv"""] )
@pytest.mark.parametrize("""revision""" , [None, """v2"""] )
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ )
assert url == f"""https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(snake_case__ )}"""
| 165 |
"""simple docstring"""
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
return params[f"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__="attention" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
SCREAMING_SNAKE_CASE__ = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
SCREAMING_SNAKE_CASE__ = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
SCREAMING_SNAKE_CASE__ = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
SCREAMING_SNAKE_CASE__ = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
SCREAMING_SNAKE_CASE__ = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
SCREAMING_SNAKE_CASE__ = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
SCREAMING_SNAKE_CASE__ = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ):
'''simple docstring'''
if split_mlp_wi:
SCREAMING_SNAKE_CASE__ = params[f"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
SCREAMING_SNAKE_CASE__ = params[f"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
SCREAMING_SNAKE_CASE__ = (wi_a, wi_a)
else:
SCREAMING_SNAKE_CASE__ = params[f"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
SCREAMING_SNAKE_CASE__ = params[f"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
return params[f"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def A ( snake_case__ , *, snake_case__ , snake_case__ , snake_case__ = False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = traverse_util.flatten_dict(variables["""target"""] )
SCREAMING_SNAKE_CASE__ = {"""/""".join(snake_case__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
SCREAMING_SNAKE_CASE__ = """encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , snake_case__ )
SCREAMING_SNAKE_CASE__ = collections.OrderedDict()
# Shared embeddings.
SCREAMING_SNAKE_CASE__ = old["""token_embedder/embedding"""]
# Encoder.
for i in range(snake_case__ ):
# Block i, layer 0 (Self Attention).
SCREAMING_SNAKE_CASE__ = tax_layer_norm_lookup(snake_case__ , snake_case__ , """encoder""" , """pre_attention_layer_norm""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = tax_attention_lookup(snake_case__ , snake_case__ , """encoder""" , """attention""" )
SCREAMING_SNAKE_CASE__ = layer_norm
SCREAMING_SNAKE_CASE__ = k.T
SCREAMING_SNAKE_CASE__ = o.T
SCREAMING_SNAKE_CASE__ = q.T
SCREAMING_SNAKE_CASE__ = v.T
# Block i, layer 1 (MLP).
SCREAMING_SNAKE_CASE__ = tax_layer_norm_lookup(snake_case__ , snake_case__ , """encoder""" , """pre_mlp_layer_norm""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = tax_mlp_lookup(snake_case__ , snake_case__ , """encoder""" , snake_case__ )
SCREAMING_SNAKE_CASE__ = layer_norm
if split_mlp_wi:
SCREAMING_SNAKE_CASE__ = wi[0].T
SCREAMING_SNAKE_CASE__ = wi[1].T
else:
SCREAMING_SNAKE_CASE__ = wi.T
SCREAMING_SNAKE_CASE__ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
SCREAMING_SNAKE_CASE__ = tax_relpos_bias_lookup(
snake_case__ , snake_case__ , """encoder""" ).T
SCREAMING_SNAKE_CASE__ = old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
SCREAMING_SNAKE_CASE__ = tax_relpos_bias_lookup(
snake_case__ , 0 , """encoder""" ).T
SCREAMING_SNAKE_CASE__ = tax_relpos_bias_lookup(
snake_case__ , 0 , """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(snake_case__ ):
# Block i, layer 0 (Self Attention).
SCREAMING_SNAKE_CASE__ = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_self_attention_layer_norm""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = tax_attention_lookup(snake_case__ , snake_case__ , """decoder""" , """self_attention""" )
SCREAMING_SNAKE_CASE__ = layer_norm
SCREAMING_SNAKE_CASE__ = k.T
SCREAMING_SNAKE_CASE__ = o.T
SCREAMING_SNAKE_CASE__ = q.T
SCREAMING_SNAKE_CASE__ = v.T
# Block i, layer 1 (Cross Attention).
SCREAMING_SNAKE_CASE__ = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_cross_attention_layer_norm""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = tax_attention_lookup(snake_case__ , snake_case__ , """decoder""" , """encoder_decoder_attention""" )
SCREAMING_SNAKE_CASE__ = layer_norm
SCREAMING_SNAKE_CASE__ = k.T
SCREAMING_SNAKE_CASE__ = o.T
SCREAMING_SNAKE_CASE__ = q.T
SCREAMING_SNAKE_CASE__ = v.T
# Block i, layer 2 (MLP).
SCREAMING_SNAKE_CASE__ = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_mlp_layer_norm""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = tax_mlp_lookup(snake_case__ , snake_case__ , """decoder""" , snake_case__ )
SCREAMING_SNAKE_CASE__ = layer_norm
if split_mlp_wi:
SCREAMING_SNAKE_CASE__ = wi[0].T
SCREAMING_SNAKE_CASE__ = wi[1].T
else:
SCREAMING_SNAKE_CASE__ = wi.T
SCREAMING_SNAKE_CASE__ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
SCREAMING_SNAKE_CASE__ = tax_relpos_bias_lookup(snake_case__ , snake_case__ , """decoder""" ).T
SCREAMING_SNAKE_CASE__ = old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
SCREAMING_SNAKE_CASE__ = old["""decoder/logits_dense/kernel"""].T
return new
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
SCREAMING_SNAKE_CASE__ = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
SCREAMING_SNAKE_CASE__ = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
SCREAMING_SNAKE_CASE__ = state_dict["""shared.weight"""]
return state_dict
def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = checkpoints.load_tax_checkpoint(snake_case__ )
SCREAMING_SNAKE_CASE__ = convert_tax_to_pytorch(
snake_case__ , num_layers=config.num_layers , is_encoder_only=snake_case__ , scalable_attention=snake_case__ )
SCREAMING_SNAKE_CASE__ = make_state_dict(snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ , strict=snake_case__ )
def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False , snake_case__ = False , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = MTaConfig.from_json_file(snake_case__ )
print(f"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
SCREAMING_SNAKE_CASE__ = UMTaEncoderModel(snake_case__ )
else:
SCREAMING_SNAKE_CASE__ = UMTaForConditionalGeneration(snake_case__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(snake_case__ )
# Verify that we can load the checkpoint.
model.from_pretrained(snake_case__ )
print("""Done""" )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
parser.add_argument(
"--scalable_attention",
action="store_true",
help="Whether the model uses scaled attention (umt5 model)",
default=False,
)
A_ : int = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 165 | 1 |
"""simple docstring"""
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
__A : Dict = 'sshleifer/mar_enro_6_3_student'
class __UpperCamelCase ( _A ):
def SCREAMING_SNAKE_CASE__ (self : Optional[int]):
super().setUp()
A = cached_path(
"https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz" , extract_compressed_file=__SCREAMING_SNAKE_CASE , )
A = F"""{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"""
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ (self : Optional[int]):
MarianMTModel.from_pretrained(__SCREAMING_SNAKE_CASE)
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ (self : Tuple):
A = {
"$MAX_LEN": 6_4,
"$BS": 6_4,
"$GAS": 1,
"$ENRO_DIR": self.data_dir,
"facebook/mbart-large-cc25": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"--learning_rate=3e-5": "--learning_rate 3e-4",
"--num_train_epochs 6": "--num_train_epochs 1",
}
# Clean up bash script
A = (self.test_file_dir / "train_mbart_cc25_enro.sh").open().read().split("finetune.py")[1].strip()
A = bash_script.replace("\\\n" , "").strip().replace("\"$@\"" , "")
for k, v in env_vars_to_replace.items():
A = bash_script.replace(__SCREAMING_SNAKE_CASE , str(__SCREAMING_SNAKE_CASE))
A = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
A = F"""
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
""".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
A = ["finetune.py"] + bash_script.split() + args
with patch.object(__SCREAMING_SNAKE_CASE , "argv" , __SCREAMING_SNAKE_CASE):
A = argparse.ArgumentParser()
A = pl.Trainer.add_argparse_args(__SCREAMING_SNAKE_CASE)
A = SummarizationModule.add_model_specific_args(__SCREAMING_SNAKE_CASE , os.getcwd())
A = parser.parse_args()
A = main(__SCREAMING_SNAKE_CASE)
# Check metrics
A = load_json(model.metrics_save_path)
A = metrics["val"][0]
A = metrics["val"][-1]
self.assertEqual(len(metrics["val"]) , (args.max_epochs / args.val_check_interval))
assert isinstance(last_step_stats[F"""val_avg_{model.val_metric}"""] , __SCREAMING_SNAKE_CASE)
self.assertGreater(last_step_stats["val_avg_gen_time"] , 0.0_1)
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["val_avg_gen_time"] , 1.0)
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["val_avg_bleu"] - first_step_stats["val_avg_bleu"] , 2)
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["val_avg_bleu"] , 1_7)
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["val"][-1]["val_avg_bleu"] - metrics["test"][-1]["test_avg_bleu"]) , 1.1)
# check lightning ckpt can be loaded and has a reasonable statedict
A = os.listdir(__SCREAMING_SNAKE_CASE)
A = [x for x in contents if x.endswith(".ckpt")][0]
A = os.path.join(args.output_dir , __SCREAMING_SNAKE_CASE)
A = torch.load(__SCREAMING_SNAKE_CASE , map_location="cpu")
A = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight"
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
A = {os.path.basename(__SCREAMING_SNAKE_CASE) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["test"]) == 1
class __UpperCamelCase ( _A ):
@timeout_decorator.timeout(6_0_0)
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ (self : int):
A = F"""{self.test_file_dir_str}/test_data/wmt_en_ro"""
A = {
"--fp16_opt_level=O1": "",
"$MAX_LEN": 1_2_8,
"$BS": 1_6,
"$GAS": 1,
"$ENRO_DIR": data_dir,
"$m": "sshleifer/student_marian_en_ro_6_1",
"val_check_interval=0.25": "val_check_interval=1.0",
}
# Clean up bash script
A = (
(self.test_file_dir / "distil_marian_no_teacher.sh").open().read().split("distillation.py")[1].strip()
)
A = bash_script.replace("\\\n" , "").strip().replace("\"$@\"" , "")
A = bash_script.replace("--fp16 " , " ")
for k, v in env_vars_to_replace.items():
A = bash_script.replace(__SCREAMING_SNAKE_CASE , str(__SCREAMING_SNAKE_CASE))
A = self.get_auto_remove_tmp_dir()
A = bash_script.replace("--fp16" , "")
A = 6
A = (
["distillation.py"]
+ bash_script.split()
+ [
F"""--output_dir={output_dir}""",
"--gpus=1",
"--learning_rate=1e-3",
F"""--num_train_epochs={epochs}""",
"--warmup_steps=10",
"--val_check_interval=1.0",
"--do_predict",
]
)
with patch.object(__SCREAMING_SNAKE_CASE , "argv" , __SCREAMING_SNAKE_CASE):
A = argparse.ArgumentParser()
A = pl.Trainer.add_argparse_args(__SCREAMING_SNAKE_CASE)
A = SummarizationDistiller.add_model_specific_args(__SCREAMING_SNAKE_CASE , os.getcwd())
A = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
A = distill_main(__SCREAMING_SNAKE_CASE)
# Check metrics
A = load_json(model.metrics_save_path)
A = metrics["val"][0]
A = metrics["val"][-1]
assert len(metrics["val"]) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.0_1
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F"""val_avg_{model.val_metric}"""] , __SCREAMING_SNAKE_CASE)
# check lightning ckpt can be loaded and has a reasonable statedict
A = os.listdir(__SCREAMING_SNAKE_CASE)
A = [x for x in contents if x.endswith(".ckpt")][0]
A = os.path.join(args.output_dir , __SCREAMING_SNAKE_CASE)
A = torch.load(__SCREAMING_SNAKE_CASE , map_location="cpu")
A = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight"
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
A = {os.path.basename(__SCREAMING_SNAKE_CASE) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["test"]) == 1
| 57 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def __SCREAMING_SNAKE_CASE ( lowercase__=None ):
"""simple docstring"""
if subparsers is not None:
A = subparsers.add_parser("env" )
else:
A = argparse.ArgumentParser("Accelerate env command" )
parser.add_argument(
"--config_file" , default=lowercase__ , help="The config file to use for the default values in the launching script." )
if subparsers is not None:
parser.set_defaults(func=lowercase__ )
return parser
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
A = torch.__version__
A = torch.cuda.is_available()
A = is_xpu_available()
A = is_npu_available()
A = "Not found"
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(lowercase__ ):
A = load_config_from_file(args.config_file ).to_dict()
A = {
"`Accelerate` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Numpy version": np.__version__,
"PyTorch version (GPU?)": F"""{pt_version} ({pt_cuda_available})""",
"PyTorch XPU available": str(lowercase__ ),
"PyTorch NPU available": str(lowercase__ ),
"System RAM": F"""{psutil.virtual_memory().total / 1_024 ** 3:.2f} GB""",
}
if pt_cuda_available:
A = torch.cuda.get_device_name()
print("\nCopy-and-paste the text below in your GitHub issue\n" )
print("\n".join([F"""- {prop}: {val}""" for prop, val in info.items()] ) )
print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" )
A = (
"\n".join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(lowercase__ , lowercase__ )
else F"""\t{accelerate_config}"""
)
print(lowercase__ )
A = accelerate_config
return info
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
A = env_command_parser()
A = parser.parse_args()
env_command(lowercase__ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 57 | 1 |
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class _a :
def __init__( self : Any , lowercase : Any , lowercase : int , lowercase : int ):
'''simple docstring'''
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
UpperCAmelCase = img
UpperCAmelCase = img.shape[1]
UpperCAmelCase = img.shape[0]
UpperCAmelCase = dst_width
UpperCAmelCase = dst_height
UpperCAmelCase = self.src_w / self.dst_w
UpperCAmelCase = self.src_h / self.dst_h
UpperCAmelCase = UpperCAmelCase = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def A ( self : Union[str, Any] ):
'''simple docstring'''
for i in range(self.dst_h ):
for j in range(self.dst_w ):
UpperCAmelCase = self.img[self.get_y(lowercase )][self.get_x(lowercase )]
def A ( self : Dict , lowercase : int ):
'''simple docstring'''
return int(self.ratio_x * x )
def A ( self : List[Any] , lowercase : int ):
'''simple docstring'''
return int(self.ratio_y * y )
if __name__ == "__main__":
A , A =8_00, 6_00
A =imread('image_data/lena.jpg', 1)
A =NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f"""Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}""", n.output
)
waitKey(0)
destroyAllWindows()
| 34 |
'''simple docstring'''
from __future__ import annotations
import math
class a_ :
def __init__( self , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
UpperCamelCase = size
# approximate the overall size of segment tree with given value
UpperCamelCase = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
UpperCamelCase = [0 for i in range(0 , 4 * size )]
UpperCamelCase = [0 for i in range(0 , 4 * size )] # flag for lazy update
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return idx * 2
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return idx * 2 + 1
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
if left_element == right_element:
UpperCamelCase = a[left_element - 1]
else:
UpperCamelCase = (left_element + right_element) // 2
self.build(self.left(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.build(self.right(_SCREAMING_SNAKE_CASE ) , mid + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = max(
self.segment_tree[self.left(_SCREAMING_SNAKE_CASE )] , self.segment_tree[self.right(_SCREAMING_SNAKE_CASE )] )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
if self.flag[idx] is True:
UpperCamelCase = self.lazy[idx]
UpperCamelCase = False
if left_element != right_element:
UpperCamelCase = self.lazy[idx]
UpperCamelCase = self.lazy[idx]
UpperCamelCase = True
UpperCamelCase = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
UpperCamelCase = val
if left_element != right_element:
UpperCamelCase = val
UpperCamelCase = val
UpperCamelCase = True
UpperCamelCase = True
return True
UpperCamelCase = (left_element + right_element) // 2
self.update(self.left(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.update(self.right(_SCREAMING_SNAKE_CASE ) , mid + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = max(
self.segment_tree[self.left(_SCREAMING_SNAKE_CASE )] , self.segment_tree[self.right(_SCREAMING_SNAKE_CASE )] )
return True
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int | float:
"""simple docstring"""
if self.flag[idx] is True:
UpperCamelCase = self.lazy[idx]
UpperCamelCase = False
if left_element != right_element:
UpperCamelCase = self.lazy[idx]
UpperCamelCase = self.lazy[idx]
UpperCamelCase = True
UpperCamelCase = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
UpperCamelCase = (left_element + right_element) // 2
UpperCamelCase = self.query(self.left(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self.query(self.right(_SCREAMING_SNAKE_CASE ) , mid + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __str__( self ) -> str:
"""simple docstring"""
return str([self.query(1 , 1 , self.size , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8]
SCREAMING_SNAKE_CASE__ = 1_5
SCREAMING_SNAKE_CASE__ = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 1_1))
print(segt.query(1, 1, size, 7, 1_2))
segt.update(1, 1, size, 1, 3, 1_1_1)
print(segt.query(1, 1, size, 1, 1_5))
segt.update(1, 1, size, 7, 8, 2_3_5)
print(segt)
| 321 | 0 |
'''simple docstring'''
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class _a :
'''simple docstring'''
def __init__( self, A, A=13, A=7, A=True, A=True, A=False, A=True, A=99, A=64, A=5, A=4, A=64, A="gelu", A=0.1, A=0.1, A=512, A=16, A=2, A=0.02, A=3, A=4, A=None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = parent
SCREAMING_SNAKE_CASE : List[Any] = batch_size
SCREAMING_SNAKE_CASE : str = seq_length
SCREAMING_SNAKE_CASE : Tuple = is_training
SCREAMING_SNAKE_CASE : int = use_input_mask
SCREAMING_SNAKE_CASE : int = use_token_type_ids
SCREAMING_SNAKE_CASE : Any = use_labels
SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : Any = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = type_vocab_size
SCREAMING_SNAKE_CASE : Dict = type_sequence_label_size
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : Tuple = num_labels
SCREAMING_SNAKE_CASE : Optional[Any] = num_choices
SCREAMING_SNAKE_CASE : int = scope
def UpperCamelCase_ ( self ):
'''simple docstring'''
return MPNetConfig.from_pretrained('microsoft/mpnet-base' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
SCREAMING_SNAKE_CASE : int = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Any = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size], self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size], self.num_choices )
SCREAMING_SNAKE_CASE : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self ):
'''simple docstring'''
return MPNetConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, )
def UpperCamelCase_ ( self, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = MPNetModel(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = model(_a, _a )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = MPNetForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = model(
_a, attention_mask=_a, start_positions=_a, end_positions=_a, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE : List[Any] = MPNetForSequenceClassification(_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(_a, attention_mask=_a, labels=_a )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.num_choices
SCREAMING_SNAKE_CASE : str = MPNetForMultipleChoice(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
SCREAMING_SNAKE_CASE : int = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
SCREAMING_SNAKE_CASE : Tuple = model(
_a, attention_mask=_a, labels=_a, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.num_labels
SCREAMING_SNAKE_CASE : Optional[int] = MPNetForTokenClassification(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE : Tuple = model(_a, attention_mask=_a, labels=_a )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
(SCREAMING_SNAKE_CASE) : int = config_and_inputs
SCREAMING_SNAKE_CASE : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _a ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
A : Dict = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
A : Tuple = (
{
"""feature-extraction""": MPNetModel,
"""fill-mask""": MPNetForMaskedLM,
"""question-answering""": MPNetForQuestionAnswering,
"""text-classification""": MPNetForSequenceClassification,
"""token-classification""": MPNetForTokenClassification,
"""zero-shot""": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
A : Tuple = False
A : Any = True
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = MPNetModelTester(self )
SCREAMING_SNAKE_CASE : Optional[int] = ConfigTester(self, config_class=_a, hidden_size=37 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*_a )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*_a )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*_a )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*_a )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*_a )
@require_torch
class _a ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = MPNetModel.from_pretrained('microsoft/mpnet-base' )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
SCREAMING_SNAKE_CASE : Any = model(_a )[0]
SCREAMING_SNAKE_CASE : Tuple = torch.Size((1, 11, 768) )
self.assertEqual(output.shape, _a )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[[[-0.05_50, 0.19_43, -0.07_40], [-0.05_62, 0.22_11, -0.05_79], [-0.04_37, 0.33_37, -0.06_41]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3], _a, atol=1E-4 ) )
| 362 |
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : int = ['''pixel_values''']
def __init__( self, A = True, A = None, A = PILImageResampling.BICUBIC, A = True, A = None, A = True, A = 1 / 255, A = True, A = IMAGENET_DEFAULT_MEAN, A = IMAGENET_DEFAULT_STD, **A, ):
'''simple docstring'''
super().__init__(**A )
SCREAMING_SNAKE_CASE : Any = size if size is not None else {'shortest_edge': 224}
SCREAMING_SNAKE_CASE : List[str] = get_size_dict(A, default_to_square=A )
SCREAMING_SNAKE_CASE : str = crop_size if crop_size is not None else {'height': 224, 'width': 224}
SCREAMING_SNAKE_CASE : int = get_size_dict(A, param_name='crop_size' )
SCREAMING_SNAKE_CASE : Any = do_resize
SCREAMING_SNAKE_CASE : Dict = size
SCREAMING_SNAKE_CASE : int = resample
SCREAMING_SNAKE_CASE : List[Any] = do_center_crop
SCREAMING_SNAKE_CASE : Dict = crop_size
SCREAMING_SNAKE_CASE : Union[str, Any] = do_rescale
SCREAMING_SNAKE_CASE : Any = rescale_factor
SCREAMING_SNAKE_CASE : List[Any] = do_normalize
SCREAMING_SNAKE_CASE : List[str] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
SCREAMING_SNAKE_CASE : Optional[int] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase_ ( self, A, A, A = PILImageResampling.BICUBIC, A = None, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = get_size_dict(A, default_to_square=A )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE : List[str] = int((256 / 224) * size['shortest_edge'] )
SCREAMING_SNAKE_CASE : int = get_resize_output_image_size(A, size=A, default_to_square=A )
SCREAMING_SNAKE_CASE : List[str] = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}" )
return resize(
A, size=(size_dict['height'], size_dict['width']), resample=A, data_format=A, **A )
def UpperCamelCase_ ( self, A, A, A = None, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(F"Size dict must have keys 'height' and 'width'. Got {size.keys()}" )
return center_crop(A, size=(size['height'], size['width']), data_format=A, **A )
def UpperCamelCase_ ( self, A, A, A = None, **A, ):
'''simple docstring'''
return rescale(A, scale=A, data_format=A, **A )
def UpperCamelCase_ ( self, A, A, A, A = None, **A, ):
'''simple docstring'''
return normalize(A, mean=A, std=A, data_format=A, **A )
def UpperCamelCase_ ( self, A, A = None, A = None, A = None, A = None, A = None, A = None, A = None, A = None, A = None, A = None, A = None, A = ChannelDimension.FIRST, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : Optional[int] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : str = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : Dict = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : int = size if size is not None else self.size
SCREAMING_SNAKE_CASE : Dict = get_size_dict(A, default_to_square=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE : Optional[Any] = get_size_dict(A, param_name='crop_size' )
SCREAMING_SNAKE_CASE : List[Any] = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Optional[int] = [to_numpy_array(A ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE : List[str] = [self.resize(A, A, A ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE : List[Any] = [self.center_crop(A, A ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : List[str] = [self.rescale(A, A ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE : Optional[int] = [self.normalize(A, A, A ) for image in images]
SCREAMING_SNAKE_CASE : List[str] = [to_channel_dimension_format(A, A ) for image in images]
SCREAMING_SNAKE_CASE : Optional[int] = {'pixel_values': images}
return BatchFeature(data=A, tensor_type=A )
| 246 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""kssteven/ibert-roberta-base""": """https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json""",
"""kssteven/ibert-roberta-large""": """https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json""",
"""kssteven/ibert-roberta-large-mnli""": (
"""https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"""
),
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''ibert'''
def __init__( self : Optional[Any] , lowerCamelCase_ : Optional[Any]=3_05_22 , lowerCamelCase_ : Tuple=7_68 , lowerCamelCase_ : List[str]=12 , lowerCamelCase_ : Optional[Any]=12 , lowerCamelCase_ : Optional[Any]=30_72 , lowerCamelCase_ : Optional[Any]="gelu" , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : List[Any]=5_12 , lowerCamelCase_ : Tuple=2 , lowerCamelCase_ : Optional[Any]=0.02 , lowerCamelCase_ : List[Any]=1e-12 , lowerCamelCase_ : Union[str, Any]=1 , lowerCamelCase_ : Tuple=0 , lowerCamelCase_ : Any=2 , lowerCamelCase_ : Tuple="absolute" , lowerCamelCase_ : List[Any]=False , lowerCamelCase_ : Optional[int]="none" , **lowerCamelCase_ : Optional[int] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Any = type_vocab_size
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[Any] = position_embedding_type
SCREAMING_SNAKE_CASE : int = quant_mode
SCREAMING_SNAKE_CASE : List[str] = force_dequant
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
@property
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE : Dict = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 323 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 3_84
SCREAMING_SNAKE_CASE : Union[str, Any] = 7
if "tiny" in model_name:
SCREAMING_SNAKE_CASE : List[str] = 96
SCREAMING_SNAKE_CASE : List[str] = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE : List[Any] = (3, 6, 12, 24)
elif "small" in model_name:
SCREAMING_SNAKE_CASE : Any = 96
SCREAMING_SNAKE_CASE : List[str] = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : int = (3, 6, 12, 24)
elif "base" in model_name:
SCREAMING_SNAKE_CASE : int = 1_28
SCREAMING_SNAKE_CASE : Any = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : int = (4, 8, 16, 32)
SCREAMING_SNAKE_CASE : Optional[Any] = 12
SCREAMING_SNAKE_CASE : str = 5_12
elif "large" in model_name:
SCREAMING_SNAKE_CASE : Tuple = 1_92
SCREAMING_SNAKE_CASE : Tuple = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : List[str] = (6, 12, 24, 48)
SCREAMING_SNAKE_CASE : Tuple = 12
SCREAMING_SNAKE_CASE : Union[str, Any] = 7_68
# set label information
SCREAMING_SNAKE_CASE : List[str] = 1_50
SCREAMING_SNAKE_CASE : Optional[Any] = """huggingface/label-files"""
SCREAMING_SNAKE_CASE : List[str] = """ade20k-id2label.json"""
SCREAMING_SNAKE_CASE : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE : str = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Optional[Any] = SwinConfig(
embed_dim=lowerCamelCase_ , depths=lowerCamelCase_ , num_heads=lowerCamelCase_ , window_size=lowerCamelCase_ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
SCREAMING_SNAKE_CASE : List[str] = UperNetConfig(
backbone_config=lowerCamelCase_ , auxiliary_in_channels=lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , )
return config
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = dct.pop(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = val
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE : Dict = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : int = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE : Any = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE : str = in_proj_bias[-dim :]
# fmt: on
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = x.shape
SCREAMING_SNAKE_CASE : Any = x.reshape(lowerCamelCase_ , 4 , in_channel // 4 )
SCREAMING_SNAKE_CASE : Any = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = x.shape
SCREAMING_SNAKE_CASE : Dict = x.reshape(lowerCamelCase_ , in_channel // 4 , 4 )
SCREAMING_SNAKE_CASE : str = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = x.shape[0]
SCREAMING_SNAKE_CASE : List[str] = x.reshape(4 , in_channel // 4 )
SCREAMING_SNAKE_CASE : str = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = x.shape[0]
SCREAMING_SNAKE_CASE : Optional[int] = x.reshape(in_channel // 4 , 4 )
SCREAMING_SNAKE_CASE : str = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
SCREAMING_SNAKE_CASE : List[str] = model_name_to_url[model_name]
SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location="""cpu""" , file_name=lowerCamelCase_ )[
"""state_dict"""
]
for name, param in state_dict.items():
print(lowerCamelCase_ , param.shape )
SCREAMING_SNAKE_CASE : Dict = get_upernet_config(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = UperNetForSemanticSegmentation(lowerCamelCase_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(lowerCamelCase_ )
if "bn" in key:
SCREAMING_SNAKE_CASE : List[str] = key.replace("""bn""" , """batch_norm""" )
SCREAMING_SNAKE_CASE : Optional[Any] = val
# rename keys
SCREAMING_SNAKE_CASE : Union[str, Any] = create_rename_keys(lowerCamelCase_ )
for src, dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
read_in_q_k_v(lowerCamelCase_ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
SCREAMING_SNAKE_CASE : Tuple = reverse_correct_unfold_reduction_order(lowerCamelCase_ )
if "norm" in key:
SCREAMING_SNAKE_CASE : Optional[int] = reverse_correct_unfold_norm_order(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
# verify on image
SCREAMING_SNAKE_CASE : Optional[int] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
SCREAMING_SNAKE_CASE : Tuple = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert("""RGB""" )
SCREAMING_SNAKE_CASE : Optional[int] = SegformerImageProcessor()
SCREAMING_SNAKE_CASE : str = processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = outputs.logits
print(logits.shape )
print("""First values of logits:""" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] )
elif model_name == "upernet-swin-small":
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] )
elif model_name == "upernet-swin-base":
SCREAMING_SNAKE_CASE : str = torch.tensor(
[[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] )
elif model_name == "upernet-swin-large":
SCREAMING_SNAKE_CASE : str = torch.tensor(
[[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase_ , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-swin-tiny""",
type=str,
choices=[f'''upernet-swin-{size}''' for size in ["""tiny""", """small""", """base""", """large"""]],
help="""Name of the Swin + UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__UpperCAmelCase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 323 | 1 |
import random
from typing import Any
def UpperCamelCase (lowercase_: list ) -> list[Any]:
for _ in range(len(lowercase_ ) ):
A__ : List[Any] = random.randint(0 , len(lowercase_ ) - 1 )
A__ : Optional[int] = random.randint(0 , len(lowercase_ ) - 1 )
A__ : Union[str, Any] = data[b], data[a]
return data
if __name__ == "__main__":
A_ : Optional[int] = [0, 1, 2, 3, 4, 5, 6, 7]
A_ : Any = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 370 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : str = logging.get_logger(__name__)
A_ : Any = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Dict = '''falcon'''
UpperCAmelCase__: Any = ['''past_key_values''']
def __init__( self , A__=6_5024 , A__=4544 , A__=32 , A__=71 , A__=1e-5 , A__=0.0_2 , A__=True , A__=0.0 , A__=0.0 , A__=None , A__=False , A__=False , A__=True , A__=True , A__=False , A__=11 , A__=11 , **A__ , ):
A__ : Dict = vocab_size
# Backward compatibility with n_embed kwarg
A__ : Union[str, Any] = kwargs.pop("""n_embed""" , A__ )
A__ : Optional[Any] = hidden_size if n_embed is None else n_embed
A__ : Any = num_hidden_layers
A__ : Any = num_attention_heads
A__ : Optional[Any] = layer_norm_epsilon
A__ : Tuple = initializer_range
A__ : Tuple = use_cache
A__ : str = hidden_dropout
A__ : List[str] = attention_dropout
A__ : List[Any] = bos_token_id
A__ : Optional[Any] = eos_token_id
A__ : Optional[Any] = num_attention_heads if num_kv_heads is None else num_kv_heads
A__ : List[str] = alibi
A__ : Tuple = new_decoder_architecture
A__ : List[str] = multi_query # Ignored when new_decoder_architecture is True
A__ : List[Any] = parallel_attn
A__ : int = bias
super().__init__(bos_token_id=A__ , eos_token_id=A__ , **A__ )
@property
def __A ( self ):
return self.hidden_size // self.num_attention_heads
@property
def __A ( self ):
return not self.alibi
| 141 | 0 |
"""simple docstring"""
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ = "cpu" , lowerCAmelCase__ = "openai/clip-vit-large-patch14"):
__SCREAMING_SNAKE_CASE = device
__SCREAMING_SNAKE_CASE = CLIPTokenizerFast.from_pretrained(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73]
__SCREAMING_SNAKE_CASE = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11]
__SCREAMING_SNAKE_CASE = torchvision.transforms.Normalize(self.image_mean , self.image_std)
__SCREAMING_SNAKE_CASE = torchvision.transforms.Resize(2_2_4)
__SCREAMING_SNAKE_CASE = torchvision.transforms.CenterCrop(2_2_4)
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.resize(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.center_crop(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.normalize(lowerCAmelCase__)
return images
def __call__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.tokenizer(text=lowerCAmelCase__ , **lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.preprocess_img(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = {key: value.to(self.device) for (key, value) in encoding.items()}
return encoding
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__=1_0 , lowerCAmelCase__=0.01 , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__="image" , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=False , ):
super().__init__()
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = device if device else get_device()
if vqgan:
__SCREAMING_SNAKE_CASE = vqgan
else:
__SCREAMING_SNAKE_CASE = load_vqgan(self.device , conf_path=lowerCAmelCase__ , ckpt_path=lowerCAmelCase__)
self.vqgan.eval()
if clip:
__SCREAMING_SNAKE_CASE = clip
else:
__SCREAMING_SNAKE_CASE = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""")
self.clip.to(self.device)
__SCREAMING_SNAKE_CASE = ProcessorGradientFlow(device=self.device)
__SCREAMING_SNAKE_CASE = iterations
__SCREAMING_SNAKE_CASE = lr
__SCREAMING_SNAKE_CASE = log
__SCREAMING_SNAKE_CASE = make_grid
__SCREAMING_SNAKE_CASE = return_val
__SCREAMING_SNAKE_CASE = quantize
__SCREAMING_SNAKE_CASE = self.vqgan.decoder.z_shape
def snake_case_ ( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=5 , lowerCAmelCase__=True):
__SCREAMING_SNAKE_CASE = []
if output_path is None:
__SCREAMING_SNAKE_CASE = """./animation.gif"""
if input_path is None:
__SCREAMING_SNAKE_CASE = self.save_path
__SCREAMING_SNAKE_CASE = sorted(glob(input_path + """/*"""))
if not len(lowerCAmelCase__):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""")
if len(lowerCAmelCase__) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""")
__SCREAMING_SNAKE_CASE = total_duration / len(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = [frame_duration] * len(lowerCAmelCase__)
if extend_frames:
__SCREAMING_SNAKE_CASE = 1.5
__SCREAMING_SNAKE_CASE = 3
for file_name in paths:
if file_name.endswith(""".png"""):
images.append(imageio.imread(lowerCAmelCase__))
imageio.mimsave(lowerCAmelCase__ , lowerCAmelCase__ , duration=lowerCAmelCase__)
print(f"gif saved to {output_path}")
def snake_case_ ( self , lowerCAmelCase__=None , lowerCAmelCase__=None):
if not (path or img):
raise ValueError("""Input either path or tensor""")
if img is not None:
raise NotImplementedError
__SCREAMING_SNAKE_CASE = preprocess(Image.open(lowerCAmelCase__) , target_image_size=2_5_6).to(self.device)
__SCREAMING_SNAKE_CASE = preprocess_vqgan(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE ,*__SCREAMING_SNAKE_CASE = self.vqgan.encode(lowerCAmelCase__)
return z
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.latent.detach().requires_grad_()
__SCREAMING_SNAKE_CASE = base_latent + transform_vector
if self.quantize:
__SCREAMING_SNAKE_CASE ,*__SCREAMING_SNAKE_CASE = self.vqgan.quantize(lowerCAmelCase__)
else:
__SCREAMING_SNAKE_CASE = trans_latent
return self.vqgan.decode(lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None):
__SCREAMING_SNAKE_CASE = self.clip_preprocessor(text=lowerCAmelCase__ , images=lowerCAmelCase__ , return_tensors="""pt""" , padding=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.clip(**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = clip_outputs.logits_per_image
if weights is not None:
__SCREAMING_SNAKE_CASE = similarity_logits * weights
return similarity_logits.sum()
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self._get_clip_similarity(pos_prompts["""prompts"""] , lowerCAmelCase__ , weights=(1 / pos_prompts["""weights"""]))
if neg_prompts:
__SCREAMING_SNAKE_CASE = self._get_clip_similarity(neg_prompts["""prompts"""] , lowerCAmelCase__ , weights=neg_prompts["""weights"""])
else:
__SCREAMING_SNAKE_CASE = torch.tensor([1] , device=self.device)
__SCREAMING_SNAKE_CASE = -torch.log(lowerCAmelCase__) + torch.log(lowerCAmelCase__)
return loss
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = torch.randn_like(self.latent , requires_grad=lowerCAmelCase__ , device=self.device)
__SCREAMING_SNAKE_CASE = torch.optim.Adam([vector] , lr=self.lr)
for i in range(self.iterations):
optim.zero_grad()
__SCREAMING_SNAKE_CASE = self._add_vector(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = loop_post_process(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self._get_CLIP_loss(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
print("""CLIP loss""" , lowerCAmelCase__)
if self.log:
wandb.log({"""CLIP Loss""": clip_loss})
clip_loss.backward(retain_graph=lowerCAmelCase__)
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0])
else:
yield vector
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
wandb.init(reinit=lowerCAmelCase__ , project="""face-editor""")
wandb.config.update({"""Positive Prompts""": positive_prompts})
wandb.config.update({"""Negative Prompts""": negative_prompts})
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations})
if image_path:
__SCREAMING_SNAKE_CASE = Image.open(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = image.resize((2_5_6, 2_5_6))
wandb.log("""Original Image""" , wandb.Image(lowerCAmelCase__))
def snake_case_ ( self , lowerCAmelCase__):
if not prompts:
return []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = [prompt.strip() for prompt in prompts.split("""|""")]
for prompt in prompts:
if isinstance(lowerCAmelCase__ , (tuple, list)):
__SCREAMING_SNAKE_CASE = prompt[0]
__SCREAMING_SNAKE_CASE = float(prompt[1])
elif ":" in prompt:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = prompt.split(""":""")
__SCREAMING_SNAKE_CASE = float(lowerCAmelCase__)
else:
__SCREAMING_SNAKE_CASE = prompt
__SCREAMING_SNAKE_CASE = 1.0
processed_prompts.append(lowerCAmelCase__)
weights.append(lowerCAmelCase__)
return {
"prompts": processed_prompts,
"weights": torch.tensor(lowerCAmelCase__ , device=self.device),
}
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=None , ):
if image_path:
__SCREAMING_SNAKE_CASE = self._get_latent(lowerCAmelCase__)
else:
__SCREAMING_SNAKE_CASE = torch.randn(self.latent_dim , device=self.device)
if self.log:
self._init_logging(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
assert pos_prompts, "You must provide at least one positive prompt."
__SCREAMING_SNAKE_CASE = self.process_prompts(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.process_prompts(lowerCAmelCase__)
if save_final and save_path is None:
__SCREAMING_SNAKE_CASE = os.path.join("""./outputs/""" , """_""".join(pos_prompts["""prompts"""]))
if not os.path.exists(lowerCAmelCase__):
os.makedirs(lowerCAmelCase__)
else:
__SCREAMING_SNAKE_CASE = save_path + """_""" + get_timestamp()
os.makedirs(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = save_path
__SCREAMING_SNAKE_CASE = self.vqgan.decode(self.latent)[0]
if show_intermediate:
print("""Original Image""")
show_pil(custom_to_pil(lowerCAmelCase__))
__SCREAMING_SNAKE_CASE = loop_post_process(lowerCAmelCase__)
for iter, transformed_img in enumerate(self._optimize_CLIP(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)):
if show_intermediate:
show_pil(lowerCAmelCase__)
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f"iter_{iter:03d}.png"))
if self.log:
wandb.log({"""Image""": wandb.Image(lowerCAmelCase__)})
if show_final:
show_pil(lowerCAmelCase__)
if save_final:
transformed_img.save(os.path.join(self.save_path , f"iter_{iter:03d}_final.png"))
| 100 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ = 100 ):
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 100 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = OpenAIGPTTokenizer
__UpperCamelCase = OpenAIGPTTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = False
def _lowerCAmelCase ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
lowerCamelCase = dict(zip(_a , range(len(_a ) ) ) )
lowerCamelCase = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""]
lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(_a ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(_a ) )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
return "lower newer", "lower newer"
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
lowerCamelCase = """lower"""
lowerCamelCase = ["""low""", """er</w>"""]
lowerCamelCase = tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
lowerCamelCase = tokens + ["""<unk>"""]
lowerCamelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
def _lowerCAmelCase ( self , _a=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCamelCase = self.rust_tokenizer_class.from_pretrained(_a , **_a )
# Simple input
lowerCamelCase = """This is a simple input"""
lowerCamelCase = ["""This is a simple input 1""", """This is a simple input 2"""]
lowerCamelCase = ("""This is a simple input""", """This is a pair""")
lowerCamelCase = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(_a , tokenizer_r.encode , _a , max_length=_a , padding="""max_length""" )
# Simple input
self.assertRaises(_a , tokenizer_r.encode_plus , _a , max_length=_a , padding="""max_length""" )
# Simple input
self.assertRaises(
_a , tokenizer_r.batch_encode_plus , _a , max_length=_a , padding="""max_length""" , )
# Pair input
self.assertRaises(_a , tokenizer_r.encode , _a , max_length=_a , padding="""max_length""" )
# Pair input
self.assertRaises(_a , tokenizer_r.encode_plus , _a , max_length=_a , padding="""max_length""" )
# Pair input
self.assertRaises(
_a , tokenizer_r.batch_encode_plus , _a , max_length=_a , padding="""max_length""" , )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
pass
| 168 |
"""simple docstring"""
import math
import random
def a__ ( snake_case__ , snake_case__ = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
lowerCAmelCase : Dict = 0.0_2
def a__ ( snake_case__ , snake_case__ ) -> float:
lowerCamelCase = float(2 * (random.randint(1 , 1_00 )) - 1 )
for _ in range(snake_case__ ):
# Forward propagation
lowerCamelCase = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
lowerCamelCase = (expected / 1_00) - layer_a
# Error delta
lowerCamelCase = layer_1_error * sigmoid_function(snake_case__ , snake_case__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_00
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : Any = int(input("""Expected value: """))
lowerCAmelCase : List[Any] = int(input("""Number of propagations: """))
print(forward_propagation(expected, number_propagations))
| 168 | 1 |
from __future__ import annotations
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =[]
create_all_state(1, _lowerCamelCase, _lowerCamelCase, [], _lowerCamelCase )
return result
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, ):
"""simple docstring"""
if level == 0:
total_list.append(current_list[:] )
return
for i in range(_lowerCamelCase, total_number - level + 2 ):
current_list.append(_lowerCamelCase )
create_all_state(i + 1, _lowerCamelCase, level - 1, _lowerCamelCase, _lowerCamelCase )
current_list.pop()
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
for i in total_list:
print(*_lowerCamelCase )
if __name__ == "__main__":
_lowerCamelCase =4
_lowerCamelCase =2
_lowerCamelCase =generate_all_combinations(n, k)
print_all_state(total_list)
| 334 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[Any] = "naver-clova-ix/donut-base-finetuned-docvqa"
_UpperCamelCase : Dict = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
_UpperCamelCase : Optional[int] = "document_qa"
_UpperCamelCase : Any = AutoProcessor
_UpperCamelCase : Union[str, Any] = VisionEncoderDecoderModel
_UpperCamelCase : Union[str, Any] = ["image", "text"]
_UpperCamelCase : List[str] = ["text"]
def __init__( self , *a__ , **a__ ):
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*a__ , **a__ )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[int] = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
_lowerCAmelCase : Dict = task_prompt.replace("""{user_input}""" , a__ )
_lowerCAmelCase : str = self.pre_processor.tokenizer(
a__ , add_special_tokens=a__ , return_tensors="""pt""" ).input_ids
_lowerCAmelCase : Dict = self.pre_processor(a__ , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __A ( self , a__ ):
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=a__ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=a__ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=a__ , ).sequences
def __A ( self , a__ ):
_lowerCAmelCase : Tuple = self.pre_processor.batch_decode(a__ )[0]
_lowerCAmelCase : int = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
_lowerCAmelCase : List[str] = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
_lowerCAmelCase : List[str] = re.sub(r"""<.*?>""" , """""" , a__ , count=1 ).strip() # remove first task start token
_lowerCAmelCase : List[str] = self.pre_processor.tokenajson(a__ )
return sequence["answer"]
| 44 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def _a ( _SCREAMING_SNAKE_CASE="" ) -> str:
snake_case_ = tempfile.mkdtemp()
return os.path.join(_SCREAMING_SNAKE_CASE , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = torch.rand(12 , dtype=torch.floataa ) - 0.5
snake_case_ = AgentAudio(UpperCAmelCase_ )
snake_case_ = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(UpperCAmelCase_ , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(UpperCAmelCase_ ) )
# Ensure that the file contains the same value as the original tensor
snake_case_ , snake_case_ = sf.read(UpperCAmelCase_ )
self.assertTrue(torch.allclose(UpperCAmelCase_ , torch.tensor(UpperCAmelCase_ ) , atol=1E-4 ) )
def lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = torch.rand(12 , dtype=torch.floataa ) - 0.5
snake_case_ = get_new_path(suffix=""".wav""" )
sf.write(UpperCAmelCase_ , UpperCAmelCase_ , 16_000 )
snake_case_ = AgentAudio(UpperCAmelCase_ )
self.assertTrue(torch.allclose(UpperCAmelCase_ , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , UpperCAmelCase_ )
@require_vision
@require_torch
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) ->List[str]:
"""simple docstring"""
snake_case_ = torch.randint(0 , 256 , (64, 64, 3) )
snake_case_ = AgentImage(UpperCAmelCase_ )
snake_case_ = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(UpperCAmelCase_ , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(UpperCAmelCase_ ) )
def lowerCAmelCase ( self : str ) ->Optional[int]:
"""simple docstring"""
snake_case_ = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png"""
snake_case_ = Image.open(UpperCAmelCase_ )
snake_case_ = AgentImage(UpperCAmelCase_ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(UpperCAmelCase_ ) )
def lowerCAmelCase ( self : str ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png"""
snake_case_ = Image.open(UpperCAmelCase_ )
snake_case_ = AgentImage(UpperCAmelCase_ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(UpperCAmelCase_ ) )
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : Any ) ->List[Any]:
"""simple docstring"""
snake_case_ = """Hey!"""
snake_case_ = AgentText(UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , agent_type.to_string() )
self.assertEqual(UpperCAmelCase_ , agent_type.to_raw() )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
| 233 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__SCREAMING_SNAKE_CASE : Tuple = 16
__SCREAMING_SNAKE_CASE : int = 32
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 16 , _SCREAMING_SNAKE_CASE = "bert-base-cased" ) -> Optional[Any]:
snake_case_ = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(_SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case_ = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=_SCREAMING_SNAKE_CASE )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
snake_case_ = DataLoader(
tokenized_datasets["""train"""] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
snake_case_ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
# Initialize accelerator
snake_case_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ = config["""lr"""]
snake_case_ = int(config["""num_epochs"""] )
snake_case_ = int(config["""seed"""] )
snake_case_ = int(config["""batch_size"""] )
snake_case_ = args.model_name_or_path
set_seed(_SCREAMING_SNAKE_CASE )
snake_case_ , snake_case_ = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
# Instantiate optimizer
snake_case_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
snake_case_ = optimizer_cls(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
if accelerator.state.deepspeed_plugin is not None:
snake_case_ = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
snake_case_ = 1
snake_case_ = (len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
snake_case_ = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=_SCREAMING_SNAKE_CASE , )
else:
snake_case_ = DummyScheduler(_SCREAMING_SNAKE_CASE , total_num_steps=_SCREAMING_SNAKE_CASE , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# We need to keep track of how many total steps we have iterated over
snake_case_ = 0
# We also need to keep track of the stating epoch so files are named properly
snake_case_ = 0
# Now we train the model
snake_case_ = evaluate.load("""glue""" , """mrpc""" )
snake_case_ = 0
snake_case_ = {}
for epoch in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
snake_case_ = model(**_SCREAMING_SNAKE_CASE )
snake_case_ = outputs.loss
snake_case_ = loss / gradient_accumulation_steps
accelerator.backward(_SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
snake_case_ = 0
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ = model(**_SCREAMING_SNAKE_CASE )
snake_case_ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
snake_case_ , snake_case_ = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_SCREAMING_SNAKE_CASE ) - 1:
snake_case_ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case_ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
snake_case_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , _SCREAMING_SNAKE_CASE )
snake_case_ = eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
snake_case_ = eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """all_results.json""" ) , """w""" ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _a ( ) -> int:
snake_case_ = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=_SCREAMING_SNAKE_CASE , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=_SCREAMING_SNAKE_CASE , )
parser.add_argument(
"""--output_dir""" , type=_SCREAMING_SNAKE_CASE , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--performance_lower_bound""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , )
parser.add_argument(
"""--num_epochs""" , type=_SCREAMING_SNAKE_CASE , default=3 , help="""Number of train epochs.""" , )
snake_case_ = parser.parse_args()
snake_case_ = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 233 | 1 |
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
UpperCamelCase__ = 'base_with_context'
def lowerCAmelCase_ ( __A, __A ) -> int:
'''simple docstring'''
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ), requires_grad=__A )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCAmelCase__ = weights[f"""layers_{lyr_num}"""]
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
UpperCAmelCase__ = ly_weight["attention"]
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def lowerCAmelCase_ ( __A, __A ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ), requires_grad=__A )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCAmelCase__ = weights[f"""layers_{lyr_num}"""]
UpperCAmelCase__ = ly_weight["attention"]
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def lowerCAmelCase_ ( __A, __A ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ), requires_grad=__A )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
UpperCAmelCase__ = weights[f"""layers_{lyr_num}"""]
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) )
UpperCAmelCase__ = ly_weight["self_attention"]
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
UpperCAmelCase__ = ly_weight["MultiHeadDotProductAttention_0"]
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) )
return model
def lowerCAmelCase_ ( __A ) -> int:
'''simple docstring'''
UpperCAmelCase__ = checkpoints.load_tax_checkpoint(args.checkpoint_path )
UpperCAmelCase__ = jnp.tree_util.tree_map(onp.array, __A )
UpperCAmelCase__ = [
"from __gin__ import dynamic_registration",
"from music_spectrogram_diffusion.models.diffusion import diffusion_utils",
"diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0",
"diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()",
]
UpperCAmelCase__ = os.path.join(args.checkpoint_path, "..", "config.gin" )
UpperCAmelCase__ = inference.parse_training_gin_file(__A, __A )
UpperCAmelCase__ = inference.InferenceModel(args.checkpoint_path, __A )
UpperCAmelCase__ = DDPMScheduler(beta_schedule="squaredcos_cap_v2", variance_type="fixed_large" )
UpperCAmelCase__ = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"], vocab_size=synth_model.model.module.config.vocab_size, d_model=synth_model.model.module.config.emb_dim, dropout_rate=synth_model.model.module.config.dropout_rate, num_layers=synth_model.model.module.config.num_encoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, feed_forward_proj="gated-gelu", )
UpperCAmelCase__ = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims, targets_context_length=synth_model.sequence_length["targets_context"], d_model=synth_model.model.module.config.emb_dim, dropout_rate=synth_model.model.module.config.dropout_rate, num_layers=synth_model.model.module.config.num_encoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, feed_forward_proj="gated-gelu", )
UpperCAmelCase__ = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims, targets_length=synth_model.sequence_length["targets_context"], max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time, d_model=synth_model.model.module.config.emb_dim, num_layers=synth_model.model.module.config.num_decoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, dropout_rate=synth_model.model.module.config.dropout_rate, )
UpperCAmelCase__ = load_notes_encoder(ta_checkpoint["target"]["token_encoder"], __A )
UpperCAmelCase__ = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"], __A )
UpperCAmelCase__ = load_decoder(ta_checkpoint["target"]["decoder"], __A )
UpperCAmelCase__ = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" )
UpperCAmelCase__ = SpectrogramDiffusionPipeline(
notes_encoder=__A, continuous_encoder=__A, decoder=__A, scheduler=__A, melgan=__A, )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument(
'--checkpoint_path',
default=f'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help='Path to the original jax model checkpoint.',
)
UpperCamelCase__ = parser.parse_args()
main(args)
| 65 |
from __future__ import annotations
from collections import deque
class A :
def __init__(self : Dict , __UpperCAmelCase : list[str] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(__UpperCAmelCase )
self.set_fail_transitions()
def lowercase_ (self : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : str ) -> int | None:
"""simple docstring"""
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def lowercase_ (self : Dict , __UpperCAmelCase : str ) -> None:
"""simple docstring"""
UpperCAmelCase__ = 0
for character in keyword:
UpperCAmelCase__ = self.find_next_state(__UpperCAmelCase , __UpperCAmelCase )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
UpperCAmelCase__ = len(self.adlist ) - 1
else:
UpperCAmelCase__ = next_state
self.adlist[current_state]["output"].append(__UpperCAmelCase )
def lowercase_ (self : Optional[int] ) -> None:
"""simple docstring"""
UpperCAmelCase__ = deque()
for node in self.adlist[0]["next_states"]:
q.append(__UpperCAmelCase )
UpperCAmelCase__ = 0
while q:
UpperCAmelCase__ = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(__UpperCAmelCase )
UpperCAmelCase__ = self.adlist[r]["fail_state"]
while (
self.find_next_state(__UpperCAmelCase , self.adlist[child]["value"] ) is None
and state != 0
):
UpperCAmelCase__ = self.adlist[state]["fail_state"]
UpperCAmelCase__ = self.find_next_state(
__UpperCAmelCase , self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
UpperCAmelCase__ = 0
UpperCAmelCase__ = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : str ) -> dict[str, list[int]]:
"""simple docstring"""
UpperCAmelCase__ = {} # returns a dict with keywords and list of its occurrences
UpperCAmelCase__ = 0
for i in range(len(__UpperCAmelCase ) ):
while (
self.find_next_state(__UpperCAmelCase , string[i] ) is None
and current_state != 0
):
UpperCAmelCase__ = self.adlist[current_state]["fail_state"]
UpperCAmelCase__ = self.find_next_state(__UpperCAmelCase , string[i] )
if next_state is None:
UpperCAmelCase__ = 0
else:
UpperCAmelCase__ = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
UpperCAmelCase__ = []
result[key].append(i - len(__UpperCAmelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def A_ ( snake_case_ : Optional[Any] ,snake_case_ : int ,snake_case_ : Dict ,snake_case_ : Dict ,snake_case_ : Optional[int] ):
'''simple docstring'''
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if not scores:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 ,node_index * 2 ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) ,minimax(depth + 1 ,node_index * 2 + 1 ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) ,)
if is_max
else min(
minimax(depth + 1 ,node_index * 2 ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) ,minimax(depth + 1 ,node_index * 2 + 1 ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) ,)
)
def A_ ( ):
'''simple docstring'''
UpperCamelCase : Optional[int] = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
UpperCamelCase : Union[str, Any] = math.log(len(_lowerCAmelCase ) ,2 )
print(f'Optimal value : {minimax(0 ,0 ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )}' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 353 |
"""simple docstring"""
import argparse
import os
import re
__A : Dict = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
__A : Union[str, Any] = re.compile(R'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
__A : Dict = re.compile(R'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__A : List[str] = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
__A : Tuple = re.compile(R'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__A : Tuple = re.compile(R'''\[([^\]]+)\]''')
def A_ ( snake_case_ : Dict ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = _re_indent.search(snake_case_ )
return "" if search is None else search.groups()[0]
def A_ ( snake_case_ : Union[str, Any] ,snake_case_ : Dict="" ,snake_case_ : Dict=None ,snake_case_ : Any=None ):
'''simple docstring'''
UpperCamelCase : Optional[int] = 0
UpperCamelCase : List[Any] = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(snake_case_ ):
index += 1
UpperCamelCase : Optional[Any] = ["""\n""".join(lines[:index] )]
else:
UpperCamelCase : int = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
UpperCamelCase : Any = [lines[index]]
index += 1
while index < len(snake_case_ ) and (end_prompt is None or not lines[index].startswith(snake_case_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(snake_case_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(snake_case_ ) )
if index < len(snake_case_ ) - 1:
UpperCamelCase : Any = [lines[index + 1]]
index += 1
else:
UpperCamelCase : List[str] = []
else:
blocks.append("""\n""".join(snake_case_ ) )
UpperCamelCase : int = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(snake_case_ ) > 0:
blocks.append("""\n""".join(snake_case_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(snake_case_ ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def A_ ( snake_case_ : Optional[Any] ):
'''simple docstring'''
def _inner(snake_case_ : Tuple ):
return key(snake_case_ ).lower().replace("""_""" ,"""""" )
return _inner
def A_ ( snake_case_ : List[Any] ,snake_case_ : Optional[int]=None ):
'''simple docstring'''
# If no key is provided, we use a noop.
def noop(snake_case_ : Dict ):
return x
if key is None:
UpperCamelCase : int = noop
# Constants are all uppercase, they go first.
UpperCamelCase : List[Any] = [obj for obj in objects if key(snake_case_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
UpperCamelCase : str = [obj for obj in objects if key(snake_case_ )[0].isupper() and not key(snake_case_ ).isupper()]
# Functions begin with a lowercase, they go last.
UpperCamelCase : List[str] = [obj for obj in objects if not key(snake_case_ )[0].isupper()]
UpperCamelCase : Tuple = ignore_underscore(snake_case_ )
return sorted(snake_case_ ,key=snake_case_ ) + sorted(snake_case_ ,key=snake_case_ ) + sorted(snake_case_ ,key=snake_case_ )
def A_ ( snake_case_ : int ):
'''simple docstring'''
# This inner function sort imports between [ ].
def _replace(snake_case_ : List[Any] ):
UpperCamelCase : Any = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
UpperCamelCase : Union[str, Any] = [part.strip().replace("""\"""" ,"""""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCamelCase : List[str] = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(snake_case_ )] ) + "]"
UpperCamelCase : str = import_statement.split("""\n""" )
if len(snake_case_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
UpperCamelCase : str = 2 if lines[1].strip() == """[""" else 1
UpperCamelCase : Dict = [(i, _re_strip_line.search(snake_case_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
UpperCamelCase : int = sort_objects(snake_case_ ,key=lambda snake_case_ : x[1] )
UpperCamelCase : Any = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(snake_case_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
UpperCamelCase : List[Any] = _re_bracket_content.sub(_replace ,lines[1] )
else:
UpperCamelCase : Optional[Any] = [part.strip().replace("""\"""" ,"""""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCamelCase : List[Any] = keys[:-1]
UpperCamelCase : int = get_indent(lines[1] ) + """, """.join([f'"{k}"' for k in sort_objects(snake_case_ )] )
return "\n".join(snake_case_ )
else:
# Finally we have to deal with imports fitting on one line
UpperCamelCase : List[str] = _re_bracket_content.sub(_replace ,snake_case_ )
return import_statement
def A_ ( snake_case_ : Tuple ,snake_case_ : str=True ):
'''simple docstring'''
with open(snake_case_ ,"""r""" ) as f:
UpperCamelCase : int = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
UpperCamelCase : Dict = split_code_in_indented_blocks(
snake_case_ ,start_prompt="""_import_structure = {""" ,end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 ,len(snake_case_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
UpperCamelCase : Optional[Any] = main_blocks[block_idx]
UpperCamelCase : Optional[int] = block.split("""\n""" )
# Get to the start of the imports.
UpperCamelCase : Union[str, Any] = 0
while line_idx < len(snake_case_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
UpperCamelCase : List[str] = len(snake_case_ )
else:
line_idx += 1
if line_idx >= len(snake_case_ ):
continue
# Ignore beginning and last line: they don't contain anything.
UpperCamelCase : Dict = """\n""".join(block_lines[line_idx:-1] )
UpperCamelCase : Union[str, Any] = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
UpperCamelCase : Optional[int] = split_code_in_indented_blocks(snake_case_ ,indent_level=snake_case_ )
# We have two categories of import key: list or _import_structure[key].append/extend
UpperCamelCase : Union[str, Any] = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
UpperCamelCase : Union[str, Any] = [(pattern.search(snake_case_ ).groups()[0] if pattern.search(snake_case_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
UpperCamelCase : Optional[Any] = [(i, key) for i, key in enumerate(snake_case_ ) if key is not None]
UpperCamelCase : List[Any] = [x[0] for x in sorted(snake_case_ ,key=lambda snake_case_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
UpperCamelCase : str = 0
UpperCamelCase : List[Any] = []
for i in range(len(snake_case_ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
UpperCamelCase : str = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(snake_case_ )
count += 1
# And we put our main block back together with its first and last line.
UpperCamelCase : Tuple = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(snake_case_ ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(snake_case_ ,"""w""" ) as f:
f.write("""\n""".join(snake_case_ ) )
def A_ ( snake_case_ : int=True ):
'''simple docstring'''
UpperCamelCase : Any = []
for root, _, files in os.walk(snake_case_ ):
if "__init__.py" in files:
UpperCamelCase : Union[str, Any] = sort_imports(os.path.join(snake_case_ ,"""__init__.py""" ) ,check_only=snake_case_ )
if result:
UpperCamelCase : Any = [os.path.join(snake_case_ ,"""__init__.py""" )]
if len(snake_case_ ) > 0:
raise ValueError(f'Would overwrite {len(snake_case_ )} files, run `make style`.' )
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
__A : str = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 27 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : str =AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=snake_case__ ).to(snake_case__ )
UpperCAmelCase : str =AutoTokenizer.from_pretrained('''google/mt5-small''' )
UpperCAmelCase : Any =tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
UpperCAmelCase : int =tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
UpperCAmelCase : Optional[Any] =model(input_ids.to(snake_case__ ) , labels=labels.to(snake_case__ ) ).loss
UpperCAmelCase : Union[str, Any] =-(labels.shape[-1] * loss.item())
UpperCAmelCase : Union[str, Any] =-84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 348 |
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __snake_case ( lowerCamelCase__ , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
@property
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : List[Any] =ort.SessionOptions()
UpperCAmelCase : Optional[int] =False
return options
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : int =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
UpperCAmelCase : Optional[Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
UpperCAmelCase : List[str] =OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase : Dict ='''A red cat sitting on a park bench'''
UpperCAmelCase : int =np.random.RandomState(0 )
UpperCAmelCase : Any =pipe(
prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case__ , output_type='''np''' , )
UpperCAmelCase : Dict =output.images
UpperCAmelCase : Optional[int] =images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
UpperCAmelCase : Tuple =np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
UpperCAmelCase : Tuple =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
UpperCAmelCase : List[str] =LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
UpperCAmelCase : int =OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=snake_case__ , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase : Union[str, Any] ='''A red cat sitting on a park bench'''
UpperCAmelCase : int =np.random.RandomState(0 )
UpperCAmelCase : str =pipe(
prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , guidance_scale=7.5 , num_inference_steps=20 , generator=snake_case__ , output_type='''np''' , )
UpperCAmelCase : Dict =output.images
UpperCAmelCase : int =images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
UpperCAmelCase : Union[str, Any] =np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 348 | 1 |
from math import isclose, sqrt
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = point_y / 4 / point_x
_lowerCAmelCase : Any = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
_lowerCAmelCase : List[str] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
_lowerCAmelCase : Optional[int] = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
_lowerCAmelCase : Any = outgoing_gradient**2 + 4
_lowerCAmelCase : Optional[Any] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
_lowerCAmelCase : Tuple = (point_y - outgoing_gradient * point_x) ** 2 - 100
_lowerCAmelCase : Any = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
_lowerCAmelCase : int = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
_lowerCAmelCase : str = x_minus if isclose(_lowerCamelCase , _lowerCamelCase ) else x_plus
_lowerCAmelCase : Any = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def A ( _lowerCamelCase = 1.4 , _lowerCamelCase = -9.6 ):
'''simple docstring'''
_lowerCAmelCase : int = 0
_lowerCAmelCase : float = first_x_coord
_lowerCAmelCase : float = first_y_coord
_lowerCAmelCase : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = next_point(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'''{solution() = }''')
| 300 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = tempfile.mkdtemp()
# fmt: off
_lowerCAmelCase : Optional[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
_lowerCAmelCase : Optional[Any] = dict(zip(__a, range(len(__a))))
_lowerCAmelCase : int = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
_lowerCAmelCase : Optional[Any] = {"unk_token": "<unk>"}
_lowerCAmelCase : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
_lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(__a) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(__a))
_lowerCAmelCase : List[str] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname, __a)
with open(self.image_processor_file, "w", encoding="utf-8") as fp:
json.dump(__a, __a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta)]
_lowerCAmelCase : Optional[int] = [Image.fromarray(np.moveaxis(__a, 0, -1)) for x in image_inputs]
return image_inputs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
processor_slow.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Tuple = CLIPSegProcessor.from_pretrained(self.tmpdirname, use_fast=__a)
_lowerCAmelCase : str = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
processor_fast.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Any = CLIPSegProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer, __a)
self.assertIsInstance(processor_fast.tokenizer, __a)
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor, __a)
self.assertIsInstance(processor_fast.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Any = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
_lowerCAmelCase : Tuple = self.get_image_processor(do_normalize=__a, padding_value=1.0)
_lowerCAmelCase : Union[str, Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=__a, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, __a)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_image_processor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Union[str, Any] = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : List[str] = self.prepare_image_inputs()
_lowerCAmelCase : List[str] = image_processor(__a, return_tensors="np")
_lowerCAmelCase : Optional[Any] = processor(images=__a, return_tensors="np")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_image_processor()
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Dict = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Union[str, Any] = "lower newer"
_lowerCAmelCase : List[str] = processor(text=__a)
_lowerCAmelCase : List[Any] = tokenizer(__a)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Dict = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : int = "lower newer"
_lowerCAmelCase : List[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(text=__a, images=__a)
self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask", "pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_image_processor()
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Dict = self.prepare_image_inputs()
_lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(images=__a, visual_prompt=__a)
self.assertListEqual(list(inputs.keys()), ["pixel_values", "conditional_pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : List[str] = processor.batch_decode(__a)
_lowerCAmelCase : List[Any] = tokenizer.batch_decode(__a)
self.assertListEqual(__a, __a)
| 300 | 1 |
'''simple docstring'''
class snake_case :
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = name
lowerCamelCase_ = val
def __str__( self ):
"""simple docstring"""
return f'''{self.__class__.__name__}({self.name}, {self.val})'''
def __lt__( self , UpperCamelCase ):
"""simple docstring"""
return self.val < other.val
class snake_case :
"""simple docstring"""
def __init__( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = {}
lowerCamelCase_ = {}
lowerCamelCase_ = self.build_heap(UpperCamelCase )
def __getitem__( self , UpperCamelCase ):
"""simple docstring"""
return self.get_value(UpperCamelCase )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
return (idx - 1) // 2
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
return idx * 2 + 1
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
return idx * 2 + 2
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
return self.heap_dict[key]
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = len(UpperCamelCase ) - 1
lowerCamelCase_ = self.get_parent_idx(UpperCamelCase )
for idx, i in enumerate(UpperCamelCase ):
lowerCamelCase_ = idx
lowerCamelCase_ = i.val
for i in range(UpperCamelCase , -1 , -1 ):
self.sift_down(UpperCamelCase , UpperCamelCase )
return array
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
while True:
lowerCamelCase_ = self.get_left_child_idx(UpperCamelCase ) # noqa: E741
lowerCamelCase_ = self.get_right_child_idx(UpperCamelCase )
lowerCamelCase_ = idx
if l < len(UpperCamelCase ) and array[l] < array[idx]:
lowerCamelCase_ = l
if r < len(UpperCamelCase ) and array[r] < array[smallest]:
lowerCamelCase_ = r
if smallest != idx:
lowerCamelCase_ ,lowerCamelCase_ = array[smallest], array[idx]
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
lowerCamelCase_ = smallest
else:
break
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = self.get_parent_idx(UpperCamelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
lowerCamelCase_ ,lowerCamelCase_ = self.heap[idx], self.heap[p]
lowerCamelCase_ ,lowerCamelCase_ = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
lowerCamelCase_ = p
lowerCamelCase_ = self.get_parent_idx(UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
return self.heap[0]
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ ,lowerCamelCase_ = self.heap[-1], self.heap[0]
lowerCamelCase_ ,lowerCamelCase_ = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
lowerCamelCase_ = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
self.heap.append(UpperCamelCase )
lowerCamelCase_ = len(self.heap ) - 1
lowerCamelCase_ = node.val
self.sift_up(len(self.heap ) - 1 )
def snake_case ( self ):
"""simple docstring"""
return len(self.heap ) == 0
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
lowerCamelCase_ = new_value
lowerCamelCase_ = new_value
self.sift_up(self.idx_of_element[node] )
a_ : Optional[Any] = Node("""R""", -1)
a_ : Dict = Node("""B""", 6)
a_ : Tuple = Node("""A""", 3)
a_ : str = Node("""X""", 1)
a_ : Union[str, Any] = Node("""E""", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
a_ : Dict = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("""Min Heap - before decrease key""")
for i in my_min_heap.heap:
print(i)
print("""Min Heap - After decrease key of node [B -> -17]""")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = '''ibert'''
def __init__( self : int , lowerCAmelCase__ : Any=3_0_5_2_2 , lowerCAmelCase__ : List[Any]=7_6_8 , lowerCAmelCase__ : int=1_2 , lowerCAmelCase__ : List[Any]=1_2 , lowerCAmelCase__ : Optional[int]=3_0_7_2 , lowerCAmelCase__ : List[str]="gelu" , lowerCAmelCase__ : str=0.1 , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : Optional[Any]=5_1_2 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : Any=0.02 , lowerCAmelCase__ : Union[str, Any]=1e-12 , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : Optional[Any]=0 , lowerCAmelCase__ : Tuple=2 , lowerCAmelCase__ : Optional[Any]="absolute" , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : Tuple="none" , **lowerCAmelCase__ : Optional[Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : Any = vocab_size
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : Optional[int] = num_hidden_layers
_UpperCAmelCase : Union[str, Any] = num_attention_heads
_UpperCAmelCase : Any = hidden_act
_UpperCAmelCase : Any = intermediate_size
_UpperCAmelCase : Tuple = hidden_dropout_prob
_UpperCAmelCase : str = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Union[str, Any] = type_vocab_size
_UpperCAmelCase : Optional[int] = initializer_range
_UpperCAmelCase : Tuple = layer_norm_eps
_UpperCAmelCase : Optional[int] = position_embedding_type
_UpperCAmelCase : Any = quant_mode
_UpperCAmelCase : Optional[Any] = force_dequant
class A__ ( UpperCamelCase ):
"""simple docstring"""
@property
def _lowerCAmelCase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 145 | 0 |
'''simple docstring'''
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
__UpperCAmelCase : Optional[Any] = (low + high) // 2
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = max_subarray(__a, __a, __a )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = max_subarray(__a, mid + 1, __a )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = max_cross_sum(__a, __a, __a, __a )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
__UpperCAmelCase , __UpperCAmelCase : int = float("-inf" ), -1
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = float("-inf" ), -1
__UpperCAmelCase : Any = 0
for i in range(__a, low - 1, -1 ):
summ += arr[i]
if summ > left_sum:
__UpperCAmelCase : Dict = summ
__UpperCAmelCase : int = i
__UpperCAmelCase : List[str] = 0
for i in range(mid + 1, high + 1 ):
summ += arr[i]
if summ > right_sum:
__UpperCAmelCase : List[str] = summ
__UpperCAmelCase : Optional[int] = i
return max_left, max_right, (left_sum + right_sum)
def __UpperCamelCase ( _UpperCAmelCase ):
__UpperCAmelCase : Union[str, Any] = [randint(1, __a ) for _ in range(__a )]
__UpperCAmelCase : Optional[int] = time.time()
max_subarray(__a, 0, input_size - 1 )
__UpperCAmelCase : List[str] = time.time()
return end - start
def __UpperCamelCase ( ):
__UpperCAmelCase : Any = [10, 100, 1000, 10000, 50000, 100000, 200000, 300000, 400000, 500000]
__UpperCAmelCase : List[str] = [time_max_subarray(__a ) for input_size in input_sizes]
print("No of Inputs\t\tTime Taken" )
for input_size, runtime in zip(__a, __a ):
print(__a, "\t\t", __a )
plt.plot(__a, __a )
plt.xlabel("Number of Inputs" )
plt.ylabel("Time taken in seconds" )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 350 |
'''simple docstring'''
def __UpperCamelCase ( _UpperCAmelCase ):
try:
__UpperCAmelCase : Union[str, Any] = float(_UpperCAmelCase )
except ValueError:
raise ValueError("Please enter a valid number" )
__UpperCAmelCase : str = decimal - int(_UpperCAmelCase )
if fractional_part == 0:
return int(_UpperCAmelCase ), 1
else:
__UpperCAmelCase : Dict = len(str(_UpperCAmelCase ).split("." )[1] )
__UpperCAmelCase : List[Any] = int(decimal * (10**number_of_frac_digits) )
__UpperCAmelCase : Any = 10**number_of_frac_digits
__UpperCAmelCase , __UpperCAmelCase : Tuple = denominator, numerator
while True:
__UpperCAmelCase : Dict = dividend % divisor
if remainder == 0:
break
__UpperCAmelCase , __UpperCAmelCase : str = divisor, remainder
__UpperCAmelCase , __UpperCAmelCase : List[str] = numerator / divisor, denominator / divisor
return int(_UpperCAmelCase ), int(_UpperCAmelCase )
if __name__ == "__main__":
print(f"{decimal_to_fraction(2) = }")
print(f"{decimal_to_fraction(89.0) = }")
print(f"{decimal_to_fraction('67') = }")
print(f"{decimal_to_fraction('45.0') = }")
print(f"{decimal_to_fraction(1.5) = }")
print(f"{decimal_to_fraction('6.25') = }")
print(f"{decimal_to_fraction('78td') = }")
| 37 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
UpperCAmelCase = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def __UpperCamelCase ( lowercase__ : Union[str, Any], lowercase__ : Dict, lowercase__ : Any ):
'''simple docstring'''
__lowercase =state_dict.pop(lowercase__ )
__lowercase =val
def __UpperCamelCase ( lowercase__ : int ):
'''simple docstring'''
__lowercase =OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__lowercase =key.replace('backbone.0.body', 'backbone.conv_encoder.model' )
__lowercase =value
else:
__lowercase =value
return new_state_dict
def __UpperCamelCase ( lowercase__ : Any, lowercase__ : Dict=False ):
'''simple docstring'''
__lowercase =''
if is_panoptic:
__lowercase ='conditional_detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__lowercase =state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
__lowercase =state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__lowercase =in_proj_weight[:2_56, :]
__lowercase =in_proj_bias[:2_56]
__lowercase =in_proj_weight[2_56:5_12, :]
__lowercase =in_proj_bias[2_56:5_12]
__lowercase =in_proj_weight[-2_56:, :]
__lowercase =in_proj_bias[-2_56:]
def __UpperCamelCase ( ):
'''simple docstring'''
__lowercase ='http://images.cocodataset.org/val2017/000000039769.jpg'
__lowercase =Image.open(requests.get(lowercase__, stream=lowercase__ ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( lowercase__ : List[Any], lowercase__ : Tuple ):
'''simple docstring'''
__lowercase =ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
__lowercase ='resnet101'
if "dc5" in model_name:
__lowercase =True
__lowercase ='panoptic' in model_name
if is_panoptic:
__lowercase =2_50
else:
__lowercase =91
__lowercase ='huggingface/label-files'
__lowercase ='coco-detection-id2label.json'
__lowercase =json.load(open(hf_hub_download(lowercase__, lowercase__, repo_type='dataset' ), 'r' ) )
__lowercase ={int(lowercase__ ): v for k, v in idalabel.items()}
__lowercase =idalabel
__lowercase ={v: k for k, v in idalabel.items()}
# load image processor
__lowercase ='coco_panoptic' if is_panoptic else 'coco_detection'
__lowercase =ConditionalDetrImageProcessor(format=lowercase__ )
# prepare image
__lowercase =prepare_img()
__lowercase =image_processor(images=lowercase__, return_tensors='pt' )
__lowercase =encoding['pixel_values']
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
__lowercase =torch.hub.load('DeppMeng/ConditionalDETR', lowercase__, pretrained=lowercase__ ).eval()
__lowercase =conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
__lowercase ='conditional_detr.' + src
rename_key(lowercase__, lowercase__, lowercase__ )
__lowercase =rename_backbone_keys(lowercase__ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowercase__, is_panoptic=lowercase__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__lowercase ='conditional_detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('conditional_detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
__lowercase =state_dict.pop(lowercase__ )
__lowercase =val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__lowercase =state_dict.pop(lowercase__ )
__lowercase =val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
__lowercase =state_dict.pop(lowercase__ )
__lowercase =val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
__lowercase =state_dict.pop(lowercase__ )
__lowercase =val
# finally, create HuggingFace model and load state dict
__lowercase =ConditionalDetrForSegmentation(lowercase__ ) if is_panoptic else ConditionalDetrForObjectDetection(lowercase__ )
model.load_state_dict(lowercase__ )
model.eval()
model.push_to_hub(repo_id=lowercase__, organization='DepuMeng', commit_message='Add model' )
# verify our conversion
__lowercase =conditional_detr(lowercase__ )
__lowercase =model(lowercase__ )
assert torch.allclose(outputs.logits, original_outputs['pred_logits'], atol=1E-4 )
assert torch.allclose(outputs.pred_boxes, original_outputs['pred_boxes'], atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks, original_outputs['pred_masks'], atol=1E-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
image_processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
UpperCAmelCase = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 141 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
lowerCamelCase : Tuple = 'naver-clova-ix/donut-base'
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = DonutProcessor.from_pretrained(UpperCamelCase )
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
lowercase__ = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
lowercase__ = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
lowercase__ = self.processor.tokenajson(UpperCamelCase )
self.assertDictEqual(UpperCamelCase , UpperCamelCase )
| 2 | 0 |
import torch
from torch import nn
class lowerCAmelCase ( nn.Module ):
def __init__( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any]=1 , UpperCAmelCase : Any=False ) -> Tuple:
super().__init__()
lowerCamelCase__ : int = n_token
lowerCamelCase__ : int = d_embed
lowerCamelCase__ : str = d_proj
lowerCamelCase__ : Union[str, Any] = cutoffs + [n_token]
lowerCamelCase__ : Union[str, Any] = [0] + self.cutoffs
lowerCamelCase__ : Dict = div_val
lowerCamelCase__ : List[Any] = self.cutoffs[0]
lowerCamelCase__ : Dict = len(self.cutoffs ) - 1
lowerCamelCase__ : str = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowerCamelCase__ : Dict = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowerCamelCase__ : str = nn.Parameter(torch.zeros(self.n_clusters ) )
lowerCamelCase__ : Dict = nn.ModuleList()
lowerCamelCase__ : Optional[Any] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCAmelCase , UpperCAmelCase ) ) )
else:
self.out_projs.append(UpperCAmelCase )
self.out_layers.append(nn.Linear(UpperCAmelCase , UpperCAmelCase ) )
else:
for i in range(len(self.cutoffs ) ):
lowerCamelCase__ , lowerCamelCase__ : Dict = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCamelCase__ : Union[str, Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCAmelCase , UpperCAmelCase ) ) )
self.out_layers.append(nn.Linear(UpperCAmelCase , r_idx - l_idx ) )
lowerCamelCase__ : Optional[int] = keep_order
def A_ ( self : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] ) -> Any:
if proj is None:
lowerCamelCase__ : List[str] = nn.functional.linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowerCamelCase__ : Optional[int] = nn.functional.linear(UpperCAmelCase , proj.t().contiguous() )
lowerCamelCase__ : Optional[Any] = nn.functional.linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def A_ ( self : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Dict=False ) -> Optional[int]:
if labels is not None:
# Shift so that tokens < n predict n
lowerCamelCase__ : List[Any] = hidden[..., :-1, :].contiguous()
lowerCamelCase__ : Tuple = labels[..., 1:].contiguous()
lowerCamelCase__ : Optional[Any] = hidden.view(-1 , hidden.size(-1 ) )
lowerCamelCase__ : Optional[Any] = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
lowerCamelCase__ : Dict = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowerCamelCase__ : Optional[int] = self._compute_logit(UpperCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowerCamelCase__ : Dict = labels != -100
lowerCamelCase__ : Optional[Any] = torch.zeros_like(UpperCAmelCase , dtype=hidden.dtype , device=hidden.device )
lowerCamelCase__ : int = (
-nn.functional.log_softmax(UpperCAmelCase , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowerCamelCase__ : Tuple = nn.functional.log_softmax(UpperCAmelCase , dim=-1 )
else:
# construct weights and biases
lowerCamelCase__ , lowerCamelCase__ : int = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCamelCase__ : str = self.out_layers[0].weight[l_idx:r_idx]
lowerCamelCase__ : str = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCamelCase__ : List[Any] = self.out_layers[i].weight
lowerCamelCase__ : Tuple = self.out_layers[i].bias
if i == 0:
lowerCamelCase__ : List[Any] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCamelCase__ : Optional[int] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(UpperCAmelCase )
biases.append(UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = weights[0], biases[0], self.out_projs[0]
lowerCamelCase__ : Any = self._compute_logit(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : List[str] = nn.functional.log_softmax(UpperCAmelCase , dim=1 )
if labels is None:
lowerCamelCase__ : Optional[Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowerCamelCase__ : Optional[Any] = torch.zeros_like(UpperCAmelCase , dtype=hidden.dtype , device=hidden.device )
lowerCamelCase__ : Union[str, Any] = 0
lowerCamelCase__ : List[Any] = [0] + self.cutoffs
for i in range(len(UpperCAmelCase ) - 1 ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowerCamelCase__ : List[str] = (labels >= l_idx) & (labels < r_idx)
lowerCamelCase__ : Dict = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowerCamelCase__ : Tuple = labels.index_select(0 , UpperCAmelCase ) - l_idx
lowerCamelCase__ : Any = head_logprob.index_select(0 , UpperCAmelCase )
lowerCamelCase__ : str = hidden.index_select(0 , UpperCAmelCase )
else:
lowerCamelCase__ : List[str] = hidden
if i == 0:
if labels is not None:
lowerCamelCase__ : Any = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowerCamelCase__ : List[Any] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = weights[i], biases[i], self.out_projs[i]
lowerCamelCase__ : Any = self._compute_logit(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Dict = nn.functional.log_softmax(UpperCAmelCase , dim=1 )
lowerCamelCase__ : Dict = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowerCamelCase__ : Dict = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowerCamelCase__ : Union[str, Any] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowerCamelCase__ : Tuple = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , UpperCAmelCase , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def A_ ( self : Any , UpperCAmelCase : Tuple ) -> Union[str, Any]:
if self.n_clusters == 0:
lowerCamelCase__ : Tuple = self._compute_logit(UpperCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(UpperCAmelCase , dim=-1 )
else:
# construct weights and biases
lowerCamelCase__ , lowerCamelCase__ : List[Any] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCamelCase__ : int = self.out_layers[0].weight[l_idx:r_idx]
lowerCamelCase__ : List[str] = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCamelCase__ : int = self.out_layers[i].weight
lowerCamelCase__ : List[str] = self.out_layers[i].bias
if i == 0:
lowerCamelCase__ : Optional[Any] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCamelCase__ : Tuple = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(UpperCAmelCase )
biases.append(UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = weights[0], biases[0], self.out_projs[0]
lowerCamelCase__ : Any = self._compute_logit(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowerCamelCase__ : str = nn.functional.log_softmax(UpperCAmelCase , dim=1 )
lowerCamelCase__ : Any = [0] + self.cutoffs
for i in range(len(UpperCAmelCase ) - 1 ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowerCamelCase__ : List[Any] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = weights[i], biases[i], self.out_projs[i]
lowerCamelCase__ : Tuple = self._compute_logit(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Tuple = nn.functional.log_softmax(UpperCAmelCase , dim=1 )
lowerCamelCase__ : Any = head_logprob[:, -i] + tail_logprob_i
lowerCamelCase__ : int = logprob_i
return out
| 45 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
_UpperCAmelCase : Tuple = [
"""openmmlab/upernet-convnext-tiny""",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
_UpperCAmelCase : List[str] = """UperNetConfig"""
class lowerCAmelCase ( nn.Module ):
def __init__( self : int , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Union[int, Tuple[int, int]] , UpperCAmelCase : Union[int, Tuple[int, int], str] = 0 , UpperCAmelCase : bool = False , UpperCAmelCase : Union[int, Tuple[int, int]] = 1 , ) -> None:
super().__init__()
lowerCamelCase__ : Any = nn.Convad(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , kernel_size=UpperCAmelCase , padding=UpperCAmelCase , bias=UpperCAmelCase , dilation=UpperCAmelCase , )
lowerCamelCase__ : str = nn.BatchNormad(UpperCAmelCase )
lowerCamelCase__ : Tuple = nn.ReLU()
def A_ ( self : Tuple , UpperCAmelCase : torch.Tensor ) -> torch.Tensor:
lowerCamelCase__ : Tuple = self.conv(UpperCAmelCase )
lowerCamelCase__ : int = self.batch_norm(UpperCAmelCase )
lowerCamelCase__ : List[Any] = self.activation(UpperCAmelCase )
return output
class lowerCAmelCase ( nn.Module ):
def __init__( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int ) -> None:
super().__init__()
lowerCamelCase__ : int = [
nn.AdaptiveAvgPoolad(UpperCAmelCase ),
UperNetConvModule(UpperCAmelCase , UpperCAmelCase , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(UpperCAmelCase ) , UpperCAmelCase )
def A_ ( self : Union[str, Any] , UpperCAmelCase : torch.Tensor ) -> torch.Tensor:
lowerCamelCase__ : Dict = input
for layer in self.layers:
lowerCamelCase__ : Tuple = layer(UpperCAmelCase )
return hidden_state
class lowerCAmelCase ( nn.Module ):
def __init__( self : Tuple , UpperCAmelCase : Tuple[int, ...] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : bool ) -> None:
super().__init__()
lowerCamelCase__ : int = pool_scales
lowerCamelCase__ : Tuple = align_corners
lowerCamelCase__ : Union[str, Any] = in_channels
lowerCamelCase__ : List[Any] = channels
lowerCamelCase__ : Tuple = []
for i, pool_scale in enumerate(UpperCAmelCase ):
lowerCamelCase__ : Dict = UperNetPyramidPoolingBlock(pool_scale=UpperCAmelCase , in_channels=UpperCAmelCase , channels=UpperCAmelCase )
self.blocks.append(UpperCAmelCase )
self.add_module(str(UpperCAmelCase ) , UpperCAmelCase )
def A_ ( self : Optional[int] , UpperCAmelCase : torch.Tensor ) -> List[torch.Tensor]:
lowerCamelCase__ : Tuple = []
for ppm in self.blocks:
lowerCamelCase__ : Union[str, Any] = ppm(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = nn.functional.interpolate(
UpperCAmelCase , size=x.size()[2:] , mode='bilinear' , align_corners=self.align_corners )
ppm_outs.append(UpperCAmelCase )
return ppm_outs
class lowerCAmelCase ( nn.Module ):
def __init__( self : str , UpperCAmelCase : int , UpperCAmelCase : Any ) -> int:
super().__init__()
lowerCamelCase__ : Tuple = config
lowerCamelCase__ : Optional[Any] = config.pool_scales # e.g. (1, 2, 3, 6)
lowerCamelCase__ : List[Any] = in_channels
lowerCamelCase__ : Optional[int] = config.hidden_size
lowerCamelCase__ : Dict = False
lowerCamelCase__ : List[Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
lowerCamelCase__ : Tuple = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
lowerCamelCase__ : int = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
lowerCamelCase__ : str = nn.ModuleList()
lowerCamelCase__ : str = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
lowerCamelCase__ : str = UperNetConvModule(UpperCAmelCase , self.channels , kernel_size=1 )
lowerCamelCase__ : int = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(UpperCAmelCase )
self.fpn_convs.append(UpperCAmelCase )
lowerCamelCase__ : List[Any] = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def A_ ( self : Tuple ) -> List[Any]:
self.apply(self._init_weights )
def A_ ( self : Tuple , UpperCAmelCase : Dict ) -> str:
if isinstance(UpperCAmelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def A_ ( self : Optional[int] , UpperCAmelCase : Optional[int] ) -> Optional[int]:
lowerCamelCase__ : str = inputs[-1]
lowerCamelCase__ : List[str] = [x]
psp_outs.extend(self.psp_modules(UpperCAmelCase ) )
lowerCamelCase__ : Tuple = torch.cat(UpperCAmelCase , dim=1 )
lowerCamelCase__ : Optional[Any] = self.bottleneck(UpperCAmelCase )
return output
def A_ ( self : str , UpperCAmelCase : torch.Tensor ) -> torch.Tensor:
# build laterals
lowerCamelCase__ : Union[str, Any] = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(UpperCAmelCase ) )
# build top-down path
lowerCamelCase__ : Tuple = len(UpperCAmelCase )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowerCamelCase__ : Optional[Any] = laterals[i - 1].shape[2:]
lowerCamelCase__ : Any = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=UpperCAmelCase , mode='bilinear' , align_corners=self.align_corners )
# build outputs
lowerCamelCase__ : Any = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowerCamelCase__ : Optional[Any] = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='bilinear' , align_corners=self.align_corners )
lowerCamelCase__ : Dict = torch.cat(UpperCAmelCase , dim=1 )
lowerCamelCase__ : List[str] = self.fpn_bottleneck(UpperCAmelCase )
lowerCamelCase__ : int = self.classifier(UpperCAmelCase )
return output
class lowerCAmelCase ( nn.Module ):
def __init__( self : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 3 , UpperCAmelCase : Union[int, Tuple[int, int]] = 1 ) -> None:
super().__init__()
lowerCamelCase__ : Any = config
lowerCamelCase__ : Optional[Any] = config.auxiliary_in_channels
lowerCamelCase__ : str = config.auxiliary_channels
lowerCamelCase__ : Optional[Any] = config.auxiliary_num_convs
lowerCamelCase__ : str = config.auxiliary_concat_input
lowerCamelCase__ : List[Any] = in_index
lowerCamelCase__ : List[str] = (kernel_size // 2) * dilation
lowerCamelCase__ : Optional[int] = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=UpperCAmelCase , padding=UpperCAmelCase , dilation=UpperCAmelCase ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=UpperCAmelCase , padding=UpperCAmelCase , dilation=UpperCAmelCase ) )
if self.num_convs == 0:
lowerCamelCase__ : Optional[Any] = nn.Identity()
else:
lowerCamelCase__ : Optional[Any] = nn.Sequential(*UpperCAmelCase )
if self.concat_input:
lowerCamelCase__ : Any = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=UpperCAmelCase , padding=kernel_size // 2 )
lowerCamelCase__ : Dict = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def A_ ( self : Tuple ) -> Tuple:
self.apply(self._init_weights )
def A_ ( self : Union[str, Any] , UpperCAmelCase : List[Any] ) -> List[str]:
if isinstance(UpperCAmelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def A_ ( self : Tuple , UpperCAmelCase : torch.Tensor ) -> torch.Tensor:
# just take the relevant feature maps
lowerCamelCase__ : str = encoder_hidden_states[self.in_index]
lowerCamelCase__ : Union[str, Any] = self.convs(UpperCAmelCase )
if self.concat_input:
lowerCamelCase__ : Optional[Any] = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
lowerCamelCase__ : Optional[int] = self.classifier(UpperCAmelCase )
return output
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = UperNetConfig
UpperCAmelCase__ = """pixel_values"""
UpperCAmelCase__ = True
def A_ ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def A_ ( self : str ) -> Tuple:
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def A_ ( self : Any , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int]=False ) -> str:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase__ : Any = value
_UpperCAmelCase : List[Any] = R"""
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
_UpperCAmelCase : Union[str, Any] = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"""UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""", __UpperCamelCase, )
class lowerCAmelCase ( __UpperCamelCase ):
def __init__( self : Optional[int] , UpperCAmelCase : Union[str, Any] ) -> Dict:
super().__init__(UpperCAmelCase )
lowerCamelCase__ : List[str] = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
lowerCamelCase__ : List[Any] = UperNetHead(UpperCAmelCase , in_channels=self.backbone.channels )
lowerCamelCase__ : int = UperNetFCNHead(UpperCAmelCase ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('batch_size, sequence_length' ) )
@replace_return_docstrings(output_type=UpperCAmelCase , config_class=_CONFIG_FOR_DOC )
def A_ ( self : Union[str, Any] , UpperCAmelCase : Optional[torch.Tensor] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[torch.Tensor] = None , UpperCAmelCase : Optional[bool] = None , ) -> Union[tuple, SemanticSegmenterOutput]:
lowerCamelCase__ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase__ : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase__ : str = output_attentions if output_attentions is not None else self.config.output_attentions
lowerCamelCase__ : Optional[Any] = self.backbone.forward_with_filtered_kwargs(
UpperCAmelCase , output_hidden_states=UpperCAmelCase , output_attentions=UpperCAmelCase )
lowerCamelCase__ : List[str] = outputs.feature_maps
lowerCamelCase__ : Optional[int] = self.decode_head(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = nn.functional.interpolate(UpperCAmelCase , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=UpperCAmelCase )
lowerCamelCase__ : List[str] = None
if self.auxiliary_head is not None:
lowerCamelCase__ : List[Any] = self.auxiliary_head(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = nn.functional.interpolate(
UpperCAmelCase , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=UpperCAmelCase )
lowerCamelCase__ : List[str] = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('The number of labels should be greater than one' )
else:
# compute weighted loss
lowerCamelCase__ : Optional[Any] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
lowerCamelCase__ : str = loss_fct(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Tuple = loss_fct(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Tuple = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
lowerCamelCase__ : List[str] = (logits,) + outputs[1:]
else:
lowerCamelCase__ : int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=UpperCAmelCase , logits=UpperCAmelCase , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 45 | 1 |
class lowercase__ :
def __init__( self : Optional[Any] , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = size
SCREAMING_SNAKE_CASE : Union[str, Any] = [0] * size
SCREAMING_SNAKE_CASE : List[str] = [0] * size
@staticmethod
def __A ( UpperCamelCase__ : int ):
'''simple docstring'''
return index | (index + 1)
@staticmethod
def __A ( UpperCamelCase__ : int ):
'''simple docstring'''
return (index & (index + 1)) - 1
def __A ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = value
while index < self.size:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_prev(UpperCamelCase__ ) + 1
if current_left_border == index:
SCREAMING_SNAKE_CASE : Optional[Any] = value
else:
SCREAMING_SNAKE_CASE : Dict = max(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_next(UpperCamelCase__ )
def __A ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : int ):
'''simple docstring'''
right -= 1 # Because of right is exclusive
SCREAMING_SNAKE_CASE : str = 0
while left <= right:
SCREAMING_SNAKE_CASE : List[str] = self.get_prev(UpperCamelCase__ )
if left <= current_left:
SCREAMING_SNAKE_CASE : Optional[Any] = max(UpperCamelCase__ , self.tree[right] )
SCREAMING_SNAKE_CASE : int = current_left
else:
SCREAMING_SNAKE_CASE : Any = max(UpperCamelCase__ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 182 |
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def A ( _lowercase ):
if not is_accelerate_available():
return method
SCREAMING_SNAKE_CASE : int = version.parse(accelerate.__version__ ).base_version
if version.parse(_lowercase ) < version.parse('''0.17.0''' ):
return method
def wrapper(self , *_lowercase , **_lowercase ):
if hasattr(self , '''_hf_hook''' ) and hasattr(self._hf_hook , '''pre_forward''' ):
self._hf_hook.pre_forward(self )
return method(self , *_lowercase , **_lowercase )
return wrapper
| 182 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=snake_case_ )
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = field(default="audio-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
lowerCAmelCase__ = Features({"audio": Audio()} )
lowerCAmelCase__ = Features({"labels": ClassLabel} )
lowerCAmelCase__ = "audio"
lowerCAmelCase__ = "labels"
def A__ ( self , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , UpperCAmelCase ):
raise ValueError(F'Column {self.label_column} is not a ClassLabel.' )
lowercase_ = copy.deepcopy(self )
lowercase_ = self.label_schema.copy()
lowercase_ = features[self.label_column]
lowercase_ = label_schema
return task_template
@property
def A__ ( self ) -> Dict[str, str]:
'''simple docstring'''
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 355 |
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=0 ) -> Optional[int]:
'''simple docstring'''
lowercase_ = 1.0 if scale is None else scale
lowercase_ = 0.0 if loc is None else loc
super().__init__(UpperCAmelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=UpperCAmelCase )] )
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def A__ ( self ) -> str:
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def A__ ( self ) -> List[str]:
'''simple docstring'''
return self.variance.sqrt()
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> None:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = args_dim
lowercase_ = nn.ModuleList([nn.Linear(UpperCAmelCase , UpperCAmelCase ) for dim in args_dim.values()] )
lowercase_ = domain_map
def A__ ( self , UpperCAmelCase ) -> Tuple[torch.Tensor]:
'''simple docstring'''
lowercase_ = [proj(UpperCAmelCase ) for proj in self.proj]
return self.domain_map(*UpperCAmelCase )
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> Dict:
'''simple docstring'''
super().__init__()
lowercase_ = function
def A__ ( self , UpperCAmelCase , *UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return self.function(UpperCAmelCase , *UpperCAmelCase )
class __lowerCamelCase :
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self , UpperCAmelCase = 1 ) -> None:
'''simple docstring'''
lowercase_ = dim
lowercase_ = {k: dim * self.args_dim[k] for k in self.args_dim}
def A__ ( self , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*UpperCAmelCase )
else:
return Independent(self.distribution_class(*UpperCAmelCase ) , 1 )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , ) -> Distribution:
'''simple docstring'''
lowercase_ = self._base_distribution(UpperCAmelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(UpperCAmelCase , loc=UpperCAmelCase , scale=UpperCAmelCase , event_dim=self.event_dim )
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def A__ ( self ) -> int:
'''simple docstring'''
return len(self.event_shape )
@property
def A__ ( self ) -> float:
'''simple docstring'''
return 0.0
def A__ ( self , UpperCAmelCase ) -> nn.Module:
'''simple docstring'''
return ParameterProjection(
in_features=UpperCAmelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def A__ ( self , *UpperCAmelCase ) -> Any:
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def A__ ( UpperCAmelCase ) -> torch.Tensor:
'''simple docstring'''
return (x + torch.sqrt(torch.square(UpperCAmelCase ) + 4.0 )) / 2.0
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = {"df": 1, "loc": 1, "scale": 1}
lowerCAmelCase__ = StudentT
@classmethod
def A__ ( cls , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = cls.squareplus(UpperCAmelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
lowercase_ = 2.0 + cls.squareplus(UpperCAmelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = {"loc": 1, "scale": 1}
lowerCAmelCase__ = Normal
@classmethod
def A__ ( cls , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = cls.squareplus(UpperCAmelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = {"total_count": 1, "logits": 1}
lowerCAmelCase__ = NegativeBinomial
@classmethod
def A__ ( cls , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = cls.squareplus(UpperCAmelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def A__ ( self , UpperCAmelCase ) -> Distribution:
'''simple docstring'''
lowercase_ , lowercase_ = distr_args
if self.dim == 1:
return self.distribution_class(total_count=UpperCAmelCase , logits=UpperCAmelCase )
else:
return Independent(self.distribution_class(total_count=UpperCAmelCase , logits=UpperCAmelCase ) , 1 )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None ) -> Distribution:
'''simple docstring'''
lowercase_ , lowercase_ = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 297 | 0 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def __lowerCAmelCase (_UpperCamelCase = "https://www.worldometers.info/coronavirus" ):
__lowerCAmelCase : Optional[int] = BeautifulSoup(requests.get(_UpperCamelCase ).text , 'html.parser' )
__lowerCAmelCase : str = soup.findAll('h1' )
__lowerCAmelCase : str = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(_UpperCamelCase , _UpperCamelCase )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(f'{key}\n{value}\n')
| 86 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""FNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""FNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FNetForMaskedLM""",
"""FNetForMultipleChoice""",
"""FNetForNextSentencePrediction""",
"""FNetForPreTraining""",
"""FNetForQuestionAnswering""",
"""FNetForSequenceClassification""",
"""FNetForTokenClassification""",
"""FNetLayer""",
"""FNetModel""",
"""FNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 86 | 1 |
import argparse
from collections import defaultdict
import yaml
SCREAMING_SNAKE_CASE : Tuple = "docs/source/en/_toctree.yml"
def UpperCamelCase_( lowerCamelCase_ ) -> Tuple:
_lowercase : Tuple = defaultdict(lowerCamelCase_ )
_lowercase : int = []
_lowercase : str = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(lowerCamelCase_ )
_lowercase : Optional[int] = new_doc_list
_lowercase : str = [key for key, value in counts.items() if value > 1]
_lowercase : Union[str, Any] = []
for duplicate_key in duplicates:
_lowercase : Optional[Any] = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(lowerCamelCase_ ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
_lowercase : Optional[int] = sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(lowerCamelCase_ ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(lowerCamelCase_ )
# Sort
return overview_doc
def UpperCamelCase_( lowerCamelCase_=False ) -> Any:
with open(lowerCamelCase_ , encoding='utf-8' ) as f:
_lowercase : Optional[Any] = yaml.safe_load(f.read() )
# Get to the API doc
_lowercase : Tuple = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowercase : Tuple = content[api_idx]['sections']
# Then to the model doc
_lowercase : int = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
_lowercase : Optional[int] = api_doc[scheduler_idx]['sections']
_lowercase : List[Any] = clean_doc_toc(lowerCamelCase_ )
_lowercase : Optional[Any] = False
if new_scheduler_doc != scheduler_doc:
_lowercase : Optional[Any] = True
if overwrite:
_lowercase : str = new_scheduler_doc
if diff:
if overwrite:
_lowercase : Any = api_doc
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(lowerCamelCase_ , allow_unicode=lowerCamelCase_ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def UpperCamelCase_( lowerCamelCase_=False ) -> List[str]:
with open(lowerCamelCase_ , encoding='utf-8' ) as f:
_lowercase : int = yaml.safe_load(f.read() )
# Get to the API doc
_lowercase : Optional[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowercase : str = content[api_idx]['sections']
# Then to the model doc
_lowercase : Tuple = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
_lowercase : Tuple = False
_lowercase : Dict = api_doc[pipeline_idx]['sections']
_lowercase : Optional[Any] = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
_lowercase : Union[str, Any] = pipeline_doc['section']
_lowercase : List[str] = clean_doc_toc(lowerCamelCase_ )
if overwrite:
_lowercase : str = new_sub_pipeline_doc
new_pipeline_docs.append(lowerCamelCase_ )
# sort overall pipeline doc
_lowercase : int = clean_doc_toc(lowerCamelCase_ )
if new_pipeline_docs != pipeline_docs:
_lowercase : Tuple = True
if overwrite:
_lowercase : str = new_pipeline_docs
if diff:
if overwrite:
_lowercase : List[str] = api_doc
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(lowerCamelCase_ , allow_unicode=lowerCamelCase_ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 84 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Dict = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' )
if tokenizer_name is None:
_lowercase : Any = TOKENIZER_CLASSES
else:
_lowercase : Tuple = {tokenizer_name: getattr(lowerCamelCase_ , tokenizer_name + 'Fast' )}
logger.info(F'''Loading tokenizer classes: {tokenizer_names}''' )
for tokenizer_name in tokenizer_names:
_lowercase : Union[str, Any] = TOKENIZER_CLASSES[tokenizer_name]
_lowercase : Any = True
if checkpoint_name is None:
_lowercase : int = list(tokenizer_class.max_model_input_sizes.keys() )
else:
_lowercase : List[Any] = [checkpoint_name]
logger.info(F'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' )
for checkpoint in checkpoint_names:
logger.info(F'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' )
# Load tokenizer
_lowercase : Union[str, Any] = tokenizer_class.from_pretrained(lowerCamelCase_ , force_download=lowerCamelCase_ )
# Save fast tokenizer
logger.info(F'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' )
# For organization names we create sub-directories
if "/" in checkpoint:
_lowercase , _lowercase : str = checkpoint.split('/' )
_lowercase : Any = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
elif add_prefix:
_lowercase : Union[str, Any] = checkpoint
_lowercase : List[str] = dump_path
else:
_lowercase : str = None
_lowercase : Any = dump_path
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_lowercase : Tuple = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_lowercase : List[Any] = file_path.split(lowerCamelCase_ )[-1][0]
if next_char == "/":
_lowercase : Any = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : List[Any] = None
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
_lowercase : Optional[Any] = tokenizer.save_pretrained(
lowerCamelCase_ , legacy_format=lowerCamelCase_ , filename_prefix=lowerCamelCase_ )
logger.info(F'''=> File names {file_names}''' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(lowerCamelCase_ )
logger.info(F'''=> removing {file_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
F"Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will "
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
SCREAMING_SNAKE_CASE : str = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 84 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.