code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
__UpperCamelCase = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
__UpperCamelCase = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
__UpperCamelCase = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
__UpperCamelCase = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
__UpperCamelCase = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
__UpperCamelCase = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
__UpperCamelCase = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
__UpperCamelCase = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 113 |
"""simple docstring"""
from math import isclose, sqrt
def lowercase (SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ) -> tuple[float, float, float]:
SCREAMING_SNAKE_CASE = point_y / 4 / point_x
SCREAMING_SNAKE_CASE = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
SCREAMING_SNAKE_CASE = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
SCREAMING_SNAKE_CASE = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
SCREAMING_SNAKE_CASE = outgoing_gradient**2 + 4
SCREAMING_SNAKE_CASE = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
SCREAMING_SNAKE_CASE = (point_y - outgoing_gradient * point_x) ** 2 - 1_00
SCREAMING_SNAKE_CASE = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
SCREAMING_SNAKE_CASE = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
SCREAMING_SNAKE_CASE = x_minus if isclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else x_plus
SCREAMING_SNAKE_CASE = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def lowercase (SCREAMING_SNAKE_CASE_ : float = 1.4 , SCREAMING_SNAKE_CASE_ : float = -9.6 ) -> int:
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = first_x_coord
SCREAMING_SNAKE_CASE = first_y_coord
SCREAMING_SNAKE_CASE = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = next_point(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'''{solution() = }''')
| 113 | 1 |
"""simple docstring"""
from __future__ import annotations
class __UpperCAmelCase:
"""simple docstring"""
def __init__( self , snake_case__=None ):
'''simple docstring'''
lowercase__ : Union[str, Any]= data
lowercase__ : Optional[Any]= None
def __repr__( self ):
'''simple docstring'''
lowercase__ : str= []
lowercase__ : Tuple= self
while temp:
string_rep.append(F'''{temp.data}''' )
lowercase__ : Optional[int]= temp.next
return "->".join(snake_case__ )
def lowercase__(A ) ->Dict:
"""simple docstring"""
if not elements_list:
raise Exception("The Elements List is empty" )
lowercase__ : Optional[int]= Node(elements_list[0] )
for i in range(1 , len(A ) ):
lowercase__ : Optional[Any]= Node(elements_list[i] )
lowercase__ : str= current.next
return head
def lowercase__(A ) ->None:
"""simple docstring"""
if head_node is not None and isinstance(A , A ):
print_reverse(head_node.next )
print(head_node.data )
def lowercase__() ->str:
"""simple docstring"""
from doctest import testmod
testmod()
lowercase__ : Optional[int]= make_linked_list([14, 52, 14, 12, 43] )
print("Linked List:" )
print(A )
print("Elements in Reverse:" )
print_reverse(A )
if __name__ == "__main__":
main()
| 150 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
a : List[str] = logging.get_logger(__name__)
a : List[Any] = [
("""bert.bert""", """visual_bert"""),
("""bert.cls""", """cls"""),
("""bert.classifier""", """cls"""),
("""token_type_embeddings_visual""", """visual_token_type_embeddings"""),
("""position_embeddings_visual""", """visual_position_embeddings"""),
("""projection""", """visual_projection"""),
]
a : Dict = [
"""nlvr2_coco_pre_trained.th""",
"""nlvr2_fine_tuned.th""",
"""nlvr2_pre_trained.th""",
"""vcr_coco_pre_train.th""",
"""vcr_fine_tune.th""",
"""vcr_pre_train.th""",
"""vqa_coco_pre_trained.th""",
"""vqa_fine_tuned.th""",
"""vqa_pre_trained.th""",
]
def lowercase__(A ) ->Optional[int]:
"""simple docstring"""
lowercase__ : Any= torch.load(A , map_location="cpu" )
return sd
def lowercase__(A , A , A=rename_keys_prefix ) ->List[str]:
"""simple docstring"""
lowercase__ : int= OrderedDict()
lowercase__ : Optional[Any]= torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
lowercase__ : Union[str, Any]= key
for name_pair in rename_keys_prefix:
lowercase__ : str= new_key.replace(name_pair[0] , name_pair[1] )
lowercase__ : Union[str, Any]= d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
lowercase__ : Optional[int]= new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def lowercase__(A , A ) ->str:
"""simple docstring"""
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
lowercase__ : Union[str, Any]= "pretraining"
if "vcr" in checkpoint_path:
lowercase__ : str= {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
lowercase__ : Optional[Any]= {"visual_embedding_dim": 2_048}
elif "vqa" in checkpoint_path:
lowercase__ : int= {"visual_embedding_dim": 2_048}
elif "nlvr" in checkpoint_path:
lowercase__ : Tuple= {"visual_embedding_dim": 1_024}
else:
raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
lowercase__ : int= {"visual_embedding_dim": 512}
lowercase__ : int= "multichoice"
elif "vqa_advanced" in checkpoint_path:
lowercase__ : Dict= {"visual_embedding_dim": 2_048}
lowercase__ : Optional[Any]= "vqa_advanced"
elif "vqa" in checkpoint_path:
lowercase__ : Optional[int]= {"visual_embedding_dim": 2_048, "num_labels": 3_129}
lowercase__ : List[str]= "vqa"
elif "nlvr" in checkpoint_path:
lowercase__ : Dict= {
"visual_embedding_dim": 1_024,
"num_labels": 2,
}
lowercase__ : Any= "nlvr"
lowercase__ : List[Any]= VisualBertConfig(**A )
# Load State Dict
lowercase__ : Union[str, Any]= load_state_dict(A )
lowercase__ : List[str]= get_new_dict(A , A )
if model_type == "pretraining":
lowercase__ : Optional[Any]= VisualBertForPreTraining(A )
elif model_type == "vqa":
lowercase__ : Any= VisualBertForQuestionAnswering(A )
elif model_type == "nlvr":
lowercase__ : Union[str, Any]= VisualBertForVisualReasoning(A )
elif model_type == "multichoice":
lowercase__ : str= VisualBertForMultipleChoice(A )
model.load_state_dict(A )
# Save Checkpoints
Path(A ).mkdir(exist_ok=A )
model.save_pretrained(A )
if __name__ == "__main__":
a : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""")
a : Dict = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 150 | 1 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , ) -> Optional[int]:
lowerCamelCase_ = parent
lowerCamelCase_ = 13
lowerCamelCase_ = 7
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = 99
lowerCamelCase_ = 32
lowerCamelCase_ = 2
lowerCamelCase_ = 4
lowerCamelCase_ = 37
lowerCamelCase_ = "gelu"
lowerCamelCase_ = 0.1
lowerCamelCase_ = 0.1
lowerCamelCase_ = 512
lowerCamelCase_ = 16
lowerCamelCase_ = 2
lowerCamelCase_ = 0.0_2
lowerCamelCase_ = 3
lowerCamelCase_ = 4
lowerCamelCase_ = None
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = self.prepare_config_and_inputs()
lowerCamelCase_ = True
lowerCamelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Any:
lowerCamelCase_ = TFEsmModel(config=lowercase )
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Tuple:
lowerCamelCase_ = True
lowerCamelCase_ = TFEsmModel(config=lowercase )
lowerCamelCase_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(lowercase , encoder_hidden_states=lowercase )
# Also check the case where encoder outputs are not passed
lowerCamelCase_ = model(lowercase , attention_mask=lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
lowerCamelCase_ = TFEsmForMaskedLM(config=lowercase )
lowerCamelCase_ = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFEsmForTokenClassification(config=lowercase )
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = TFEsmModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFEsmModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@unittest.skip("Protein models do not support embedding resizing." )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
pass
@unittest.skip("Protein models do not support embedding resizing." )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
pass
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(lowercase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCamelCase_ = model.get_bias()
assert isinstance(lowercase , lowercase )
for k, v in name.items():
assert isinstance(lowercase , tf.Variable )
else:
lowerCamelCase_ = model.get_output_embeddings()
assert x is None
lowerCamelCase_ = model.get_bias()
assert name is None
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
lowerCamelCase_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ = model(lowercase )[0]
lowerCamelCase_ = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , lowercase )
# compare the actual values for a slice.
lowerCamelCase_ = tf.constant(
[
[
[8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
lowerCamelCase_ = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase_ = model(lowercase )[0]
# compare the actual values for a slice.
lowerCamelCase_ = tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 19 |
__A ={str(digit): digit**5 for digit in range(1_0)}
def lowerCamelCase_ ( lowerCamelCase__ ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowerCamelCase__ ) )
def lowerCamelCase_ ( ):
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(lowerCamelCase__ ) )
if __name__ == "__main__":
print(solution())
| 19 | 1 |
from __future__ import annotations
import math
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if not scores:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , _lowercase , _lowercase , _lowercase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowercase , _lowercase , _lowercase ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , _lowercase , _lowercase , _lowercase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowercase , _lowercase , _lowercase ) , )
)
def __A ( ):
'''simple docstring'''
_A = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
_A = math.log(len(_lowercase ) , 2 )
print(f"""Optimal value : {minimax(0 , 0 , _lowercase , _lowercase , _lowercase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 75 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __A ( _lowercase ):
'''simple docstring'''
_A = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
_A = True if '''large''' in model_name or '''huge''' in model_name else False
_A = True if '''large''' in model_name or '''huge''' in model_name else False
_A = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
_A = [3, 3, 3, 3]
_A = [5, 5, 5, 5]
elif "fl4" in model_name:
_A = [4, 4, 4, 4]
_A = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
_A = [3, 3, 3, 3]
if "lrf" in model_name:
_A = [3, 3, 3, 3]
else:
_A = [2, 2, 2, 2]
if "tiny" in model_name:
_A = 96
elif "small" in model_name:
_A = 96
elif "base" in model_name:
_A = 1_28
elif "large" in model_name:
_A = 1_92
elif "xlarge" in model_name:
_A = 2_56
elif "huge" in model_name:
_A = 3_52
# set label information
_A = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
_A = '''imagenet-22k-id2label.json'''
else:
_A = '''imagenet-1k-id2label.json'''
_A = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
_A = {int(_lowercase ): v for k, v in idalabel.items()}
_A = {v: k for k, v in idalabel.items()}
_A = FocalNetConfig(
embed_dim=_lowercase , depths=_lowercase , focal_levels=_lowercase , focal_windows=_lowercase , use_conv_embed=_lowercase , idalabel=_lowercase , labelaid=_lowercase , use_post_layernorm=_lowercase , use_layerscale=_lowercase , )
return config
def __A ( _lowercase ):
'''simple docstring'''
if "patch_embed.proj" in name:
_A = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_A = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
_A = '''encoder.''' + name
if "encoder.layers" in name:
_A = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
_A = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
_A = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
_A = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
_A = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
_A = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
_A = '''layernorm.weight'''
if name == "norm.bias":
_A = '''layernorm.bias'''
if "head" in name:
_A = name.replace('''head''' , '''classifier''' )
else:
_A = '''focalnet.''' + name
return name
def __A ( _lowercase , _lowercase , _lowercase=False ):
'''simple docstring'''
_A = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
_A = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , _lowercase )
_A = torch.hub.load_state_dict_from_url(_lowercase , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
_A = state_dict.pop(_lowercase )
_A = val
_A = get_focalnet_config(_lowercase )
_A = FocalNetForImageClassification(_lowercase )
model.eval()
# load state dict
model.load_state_dict(_lowercase )
# verify conversion
_A = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_A = BitImageProcessor(
do_resize=_lowercase , size={'''shortest_edge''': 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=_lowercase , crop_size=2_24 , do_normalize=_lowercase , image_mean=_lowercase , image_std=_lowercase , )
_A = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
_A = processor(images=_lowercase , return_tensors='''pt''' )
_A = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
_A = image_transforms(_lowercase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , _lowercase , atol=1e-4 )
_A = model(**_lowercase )
_A = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
_A = torch.tensor([0.21_66, -0.43_68, 0.21_91] )
elif model_name == "focalnet-tiny-lrf":
_A = torch.tensor([1.16_69, 0.01_25, -0.16_95] )
elif model_name == "focalnet-small":
_A = torch.tensor([0.49_17, -0.04_30, 0.13_41] )
elif model_name == "focalnet-small-lrf":
_A = torch.tensor([-0.25_88, -0.53_42, -0.23_31] )
elif model_name == "focalnet-base":
_A = torch.tensor([-0.16_55, -0.40_90, -0.17_30] )
elif model_name == "focalnet-base-lrf":
_A = torch.tensor([0.53_06, -0.04_83, -0.39_28] )
assert torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
if push_to_hub:
print(f"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(f"""{model_name}""" )
processor.push_to_hub(f"""{model_name}""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
__A = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 75 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : str = ["""image_processor""", """tokenizer"""]
_SCREAMING_SNAKE_CASE : Optional[Any] = """BlipImageProcessor"""
_SCREAMING_SNAKE_CASE : str = """AutoTokenizer"""
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]:
__lowerCAmelCase = False
super().__init__(snake_case_ , snake_case_ )
__lowerCAmelCase = self.image_processor
def __call__( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] = None , SCREAMING_SNAKE_CASE__ : List[str] = None , SCREAMING_SNAKE_CASE__ : List[str] = True , SCREAMING_SNAKE_CASE__ : str = False , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : List[str] = None , SCREAMING_SNAKE_CASE__ : Any = 0 , SCREAMING_SNAKE_CASE__ : Tuple = None , SCREAMING_SNAKE_CASE__ : Optional[Any] = None , SCREAMING_SNAKE_CASE__ : str = False , SCREAMING_SNAKE_CASE__ : Optional[int] = False , SCREAMING_SNAKE_CASE__ : Optional[Any] = False , SCREAMING_SNAKE_CASE__ : List[Any] = False , SCREAMING_SNAKE_CASE__ : Any = False , SCREAMING_SNAKE_CASE__ : str = True , SCREAMING_SNAKE_CASE__ : Union[str, Any] = None , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> Union[str, Any]:
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
__lowerCAmelCase = self.tokenizer
__lowerCAmelCase = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
return text_encoding
# add pixel_values
__lowerCAmelCase = self.image_processor(snake_case_ , return_tensors=snake_case_ )
if text is not None:
__lowerCAmelCase = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
else:
__lowerCAmelCase = None
if text_encoding is not None:
encoding_image_processor.update(snake_case_ )
return encoding_image_processor
def a ( self : Any , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : int ) -> Tuple:
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def a ( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Dict ) -> Tuple:
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def a ( self : Any ) -> List[Any]:
__lowerCAmelCase = self.tokenizer.model_input_names
__lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 229 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : List[str] = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : int = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 286 | 0 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.26.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version('''>=''', '''0.0.12''')
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 281 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _snake_case ( _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = MgpstrTokenizer
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = False
def SCREAMING_SNAKE_CASE__ ( self ):
super().setUp()
# fmt: off
a :int = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
a :List[str] = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
a :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
def SCREAMING_SNAKE_CASE__ ( self , **_lowerCamelCase ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :str = '''tester'''
a :Union[str, Any] = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[Any] = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
a :Any = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
a :str = tokenizer.encode([special_token] , add_special_tokens=_lowerCamelCase )
self.assertEqual(len(_lowerCamelCase ) , 1 )
a :Tuple = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
self.assertTrue(special_token not in decoded )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
a , a :Tuple = self.get_input_output_texts(_lowerCamelCase )
a :Tuple = tokenizer.tokenize(_lowerCamelCase )
a :int = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
a :Optional[int] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
a :Any = tokenizer.convert_ids_to_tokens(_lowerCamelCase )
self.assertNotEqual(len(_lowerCamelCase ) , 0 )
a :str = tokenizer.decode(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , _lowerCamelCase )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
| 281 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __A ( lowerCamelCase__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowercase , '''width_multiplier''' ) )
class __A :
"""simple docstring"""
def __init__( self , __A , __A=13 , __A=64 , __A=2 , __A=3 , __A="swish" , __A=3 , __A=32 , __A=0.1 , __A=0.02 , __A=True , __A=True , __A=10 , __A=None , __A=0.25 , __A=0.0 , __A=0.0 , ) -> List[Any]:
a =parent
a =batch_size
a =image_size
a =patch_size
a =num_channels
a =make_divisible(512 * width_multiplier , divisor=8 )
a =hidden_act
a =conv_kernel_size
a =output_stride
a =classifier_dropout_prob
a =use_labels
a =is_training
a =num_labels
a =initializer_range
a =scope
a =width_multiplier
a =ffn_dropout
a =attn_dropout
def SCREAMING_SNAKE_CASE ( self ) -> Any:
a =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a =None
a =None
if self.use_labels:
a =ids_tensor([self.batch_size] , self.num_labels )
a =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a =self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A ) -> Any:
a =MobileViTVaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
a =model(__lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A ) -> Dict:
a =self.num_labels
a =MobileViTVaForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
a =model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A ) -> Tuple:
a =self.num_labels
a =MobileViTVaForSemanticSegmentation(__lowercase )
model.to(__lowercase )
model.eval()
a =model(__lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
a =model(__lowercase , labels=__lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a =self.prepare_config_and_inputs()
a , a , a , a =config_and_inputs
a ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __A ( lowerCamelCase__, lowerCamelCase__, unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__lowerCAmelCase = (
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a =MobileViTVaModelTester(self )
a =MobileViTVaConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE ( self ) -> str:
pass
def SCREAMING_SNAKE_CASE ( self ) -> int:
a , a =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a =model_class(__lowercase )
a =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a =[*signature.parameters.keys()]
a =['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def SCREAMING_SNAKE_CASE ( self ) -> int:
def check_hidden_states_output(__A , __A , __A ):
a =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
a =model(**self._prepare_for_class(__lowercase , __lowercase ) )
a =outputs.hidden_states
a =5
self.assertEqual(len(__lowercase ) , __lowercase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
a =2
for i in range(len(__lowercase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
a , a =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a =True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a =True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowercase )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a =MobileViTVaModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def _A ( ):
"""simple docstring"""
a =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __A ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
a =MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
__lowercase )
a =self.default_image_processor
a =prepare_img()
a =image_processor(images=__lowercase , return_tensors='''pt''' ).to(__lowercase )
# forward pass
with torch.no_grad():
a =model(**__lowercase )
# verify the logits
a =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowercase )
a =torch.tensor([-1.6_3_3_6E0_0, -7.3_2_0_4E-0_2, -5.1_8_8_3E-0_1] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> int:
a =MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
a =model.to(__lowercase )
a =MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
a =prepare_img()
a =image_processor(images=__lowercase , return_tensors='''pt''' ).to(__lowercase )
# forward pass
with torch.no_grad():
a =model(**__lowercase )
a =outputs.logits
# verify the logits
a =torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __lowercase )
a =torch.tensor(
[
[[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]],
[[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]],
[[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]],
] , device=__lowercase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowercase , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Any:
a =MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
a =model.to(__lowercase )
a =MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
a =prepare_img()
a =image_processor(images=__lowercase , return_tensors='''pt''' ).to(__lowercase )
# forward pass
with torch.no_grad():
a =model(**__lowercase )
a =outputs.logits.detach().cpu()
a =image_processor.post_process_semantic_segmentation(outputs=__lowercase , target_sizes=[(50, 60)] )
a =torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __lowercase )
a =image_processor.post_process_semantic_segmentation(outputs=__lowercase )
a =torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __lowercase )
| 81 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Tuple =KandinskyVaaPriorPipeline
__lowerCamelCase : Union[str, Any] =['prompt']
__lowerCamelCase : Any =['prompt', 'negative_prompt']
__lowerCamelCase : List[str] =[
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
__lowerCamelCase : List[Any] =False
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return 32
@property
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return 32
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return self.time_input_dim
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return 100
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__lowercase )
@property
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
__a = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
__a = PriorTransformer(**__lowercase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__a = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__a = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__a = CLIPVisionModelWithProjection(__lowercase )
return model
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = CLIPImageProcessor(
crop_size=224 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.dummy_prior
__a = self.dummy_image_encoder
__a = self.dummy_text_encoder
__a = self.dummy_tokenizer
__a = self.dummy_image_processor
__a = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=__lowercase , clip_sample_range=10.0 , )
__a = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def UpperCamelCase_ ( self : Optional[int] , __lowercase : List[str] , __lowercase : Any=0 ):
'''simple docstring'''
if str(__lowercase ).startswith("""mps""" ):
__a = torch.manual_seed(__lowercase )
else:
__a = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__a = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = """cpu"""
__a = self.get_dummy_components()
__a = self.pipeline_class(**__lowercase )
__a = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__a = pipe(**self.get_dummy_inputs(__lowercase ) )
__a = output.image_embeds
__a = pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
__a = image[0, -10:]
__a = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__a = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = torch_device == """cpu"""
__a = True
__a = False
self._test_inference_batch_single_identical(
test_max_difference=__lowercase , relax_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
@skip_mps
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = torch_device == """cpu"""
__a = False
self._test_attention_slicing_forward_pass(
test_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
| 302 | 0 |
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
A : List[str] = None
A : Optional[int] = logging.get_logger(__name__)
A : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
A : str = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
A : Dict = {
't5-small': 5_1_2,
't5-base': 5_1_2,
't5-large': 5_1_2,
't5-3b': 5_1_2,
't5-11b': 5_1_2,
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = ['''input_ids''', '''attention_mask''']
A__ = TaTokenizer
A__ = []
def __init__(self : int , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : Optional[int]="</s>" , _UpperCAmelCase : Union[str, Any]="<unk>" , _UpperCAmelCase : str="<pad>" , _UpperCAmelCase : List[str]=100 , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : List[Any] , ) -> Optional[int]:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
lowercase__ = [f'''<extra_id_{i}>''' for i in range(_UpperCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
lowercase__ = len(set(filter(lambda _UpperCAmelCase : bool("""extra_id_""" in str(_UpperCAmelCase ) ) , _UpperCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , extra_ids=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
lowercase__ = vocab_file
lowercase__ = False if not self.vocab_file else True
lowercase__ = extra_ids
@staticmethod
def lowerCamelCase__ (_UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
lowercase__ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , _UpperCAmelCase , )
return max_model_length
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase__ = os.path.join(
_UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file , _UpperCAmelCase )
logger.info(f'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def lowerCamelCase__ (self : Any , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
lowercase__ = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def lowerCamelCase__ (self : str , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowerCamelCase__ (self : Dict ) -> str:
"""simple docstring"""
return list(
set(filter(lambda _UpperCAmelCase : bool(re.search(r"""<extra_id_\d+>""" , _UpperCAmelCase ) ) is not None , self.additional_special_tokens ) ) )
def lowerCamelCase__ (self : List[Any] ) -> int:
"""simple docstring"""
return [self.convert_tokens_to_ids(_UpperCAmelCase ) for token in self.get_sentinel_tokens()]
| 366 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : List[str] = logging.get_logger(__name__)
A : List[Any] = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''segformer'''
def __init__(self : Dict , _UpperCAmelCase : Optional[int]=3 , _UpperCAmelCase : Dict=4 , _UpperCAmelCase : Union[str, Any]=[2, 2, 2, 2] , _UpperCAmelCase : List[str]=[8, 4, 2, 1] , _UpperCAmelCase : str=[32, 64, 160, 256] , _UpperCAmelCase : Optional[int]=[7, 3, 3, 3] , _UpperCAmelCase : int=[4, 2, 2, 2] , _UpperCAmelCase : str=[1, 2, 5, 8] , _UpperCAmelCase : Optional[int]=[4, 4, 4, 4] , _UpperCAmelCase : List[Any]="gelu" , _UpperCAmelCase : Optional[Any]=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Optional[Any]=1E-6 , _UpperCAmelCase : Optional[Any]=256 , _UpperCAmelCase : Any=255 , **_UpperCAmelCase : str , ) -> Tuple:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , _UpperCAmelCase , )
lowercase__ = num_channels
lowercase__ = num_encoder_blocks
lowercase__ = depths
lowercase__ = sr_ratios
lowercase__ = hidden_sizes
lowercase__ = patch_sizes
lowercase__ = strides
lowercase__ = mlp_ratios
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = classifier_dropout_prob
lowercase__ = initializer_range
lowercase__ = drop_path_rate
lowercase__ = layer_norm_eps
lowercase__ = decoder_hidden_size
lowercase__ = kwargs.get("""reshape_last_stage""" , _UpperCAmelCase )
lowercase__ = semantic_loss_ignore_index
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = version.parse('''1.11''' )
@property
def lowerCamelCase__ (self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase__ (self : Optional[int] ) -> float:
"""simple docstring"""
return 1E-4
@property
def lowerCamelCase__ (self : Optional[Any] ) -> int:
"""simple docstring"""
return 12
| 146 | 0 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __UpperCamelCase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any]=7 ):
__a : Any = None
if token is not None:
__a : List[Any] = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f"Bearer {token}"}
# The id of a workflow (not of a workflow run)
__a : Optional[int] = '''636036'''
__a : Any = f"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f"?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"
__a : List[str] = requests.get(UpperCAmelCase_ , headers=UpperCAmelCase_ ).json()
return result["workflow_runs"]
def __UpperCamelCase ( lowerCAmelCase__ : Optional[int] ):
__a : Dict = get_daily_ci_runs(UpperCAmelCase_ )
__a : Any = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
__a : Optional[int] = workflow_run['''id''']
break
return workflow_run_id
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] ):
__a : Optional[int] = get_last_daily_ci_runs(UpperCAmelCase_ )
if workflow_run_id is not None:
__a : Optional[Any] = get_artifacts_links(worflow_run_id=UpperCAmelCase_ , token=UpperCAmelCase_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
__a : Tuple = artifacts_links[artifact_name]
download_artifact(
artifact_name=UpperCAmelCase_ , artifact_url=UpperCAmelCase_ , output_dir=UpperCAmelCase_ , token=UpperCAmelCase_ )
def __UpperCamelCase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : str ):
get_last_daily_ci_artifacts(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
__a : Dict = {}
for artifact_name in artifact_names:
__a : Optional[Any] = os.path.join(UpperCAmelCase_ , f"{artifact_name}.zip" )
if os.path.isfile(UpperCAmelCase_ ):
__a : Any = {}
with zipfile.ZipFile(UpperCAmelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(UpperCAmelCase_ ):
# read the file
with z.open(UpperCAmelCase_ ) as f:
__a : Union[str, Any] = f.read().decode('''UTF-8''' )
return results
| 216 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
SCREAMING_SNAKE_CASE_ : Dict = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
SCREAMING_SNAKE_CASE_ : str = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
SCREAMING_SNAKE_CASE_ : List[str] = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: int = CHRF.CHAR_ORDER , UpperCamelCase: int = CHRF.WORD_ORDER , UpperCamelCase: int = CHRF.BETA , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = False , ):
"""simple docstring"""
A__ = len(references[0] )
if any(len(UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
A__ = [[refs[i] for refs in references] for i in range(UpperCamelCase )]
A__ = CHRF(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = sb_chrf.corpus_score(UpperCamelCase , UpperCamelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 335 | 0 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class __snake_case:
'''simple docstring'''
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.0_2 , A_=3 , A_=4 , A_=None , ) -> Dict:
lowerCAmelCase = parent
lowerCAmelCase = 13
lowerCAmelCase = 7
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = 99
lowerCAmelCase = 384
lowerCAmelCase = 2
lowerCAmelCase = 4
lowerCAmelCase = 37
lowerCAmelCase = """gelu"""
lowerCAmelCase = 0.1
lowerCAmelCase = 0.1
lowerCAmelCase = 512
lowerCAmelCase = 16
lowerCAmelCase = 2
lowerCAmelCase = 0.0_2
lowerCAmelCase = 3
lowerCAmelCase = 4
lowerCAmelCase = 128
lowerCAmelCase = 2
lowerCAmelCase = 9
lowerCAmelCase = 1
lowerCAmelCase = None
def __snake_case ( self ) -> Optional[int]:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> int:
lowerCAmelCase = TFConvBertModel(config=A_ )
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(A_ )
lowerCAmelCase = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> List[Any]:
lowerCAmelCase = TFConvBertForMaskedLM(config=A_ )
lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[int]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFConvBertForSequenceClassification(config=A_ )
lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Any:
lowerCAmelCase = self.num_choices
lowerCAmelCase = TFConvBertForMultipleChoice(config=A_ )
lowerCAmelCase = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowerCAmelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Union[str, Any]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFConvBertForTokenClassification(config=A_ )
lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[int]:
lowerCAmelCase = TFConvBertForQuestionAnswering(config=A_ )
lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase = model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self ) -> Any:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
),
) = config_and_inputs
lowerCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __snake_case( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase : Union[str, Any] = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase : Union[str, Any] = False
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : Dict = False
def __snake_case ( self ) -> Optional[int]:
lowerCAmelCase = TFConvBertModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=A_ , hidden_size=37 )
def __snake_case ( self ) -> Tuple:
self.config_tester.run_common_tests()
def __snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def __snake_case ( self ) -> Optional[int]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
def __snake_case ( self ) -> List[str]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def __snake_case ( self ) -> str:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def __snake_case ( self ) -> Any:
lowerCAmelCase, lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = True
lowerCAmelCase = True
if hasattr(A_ , """use_cache""" ):
lowerCAmelCase = True
lowerCAmelCase = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowerCAmelCase = getattr(self.model_tester , """key_length""" , A_ )
for model_class in self.all_model_classes:
lowerCAmelCase = self._prepare_for_class(A_ , A_ )
lowerCAmelCase = model_class(A_ )
lowerCAmelCase = len(model(A_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A_ , saved_model=A_ )
lowerCAmelCase = os.path.join(A_ , """saved_model""" , """1""" )
lowerCAmelCase = tf.keras.models.load_model(A_ )
lowerCAmelCase = model(A_ )
if self.is_encoder_decoder:
lowerCAmelCase = outputs["""encoder_hidden_states"""]
lowerCAmelCase = outputs["""encoder_attentions"""]
else:
lowerCAmelCase = outputs["""hidden_states"""]
lowerCAmelCase = outputs["""attentions"""]
self.assertEqual(len(A_ ) , A_ )
lowerCAmelCase = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(A_ ) , A_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(A_ )
def __snake_case ( self ) -> str:
lowerCAmelCase, lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = True
lowerCAmelCase = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
lowerCAmelCase = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowerCAmelCase = getattr(self.model_tester , """key_length""" , A_ )
lowerCAmelCase = getattr(self.model_tester , """key_length""" , A_ )
def check_decoder_attentions_output(A_ ):
lowerCAmelCase = len(A_ )
self.assertEqual(out_len % 2 , 0 )
lowerCAmelCase = outputs.decoder_attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(A_ ):
lowerCAmelCase = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
lowerCAmelCase = True
lowerCAmelCase = False
lowerCAmelCase = model_class(A_ )
lowerCAmelCase = model(self._prepare_for_class(A_ , A_ ) )
lowerCAmelCase = len(A_ )
self.assertEqual(config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
if self.is_encoder_decoder:
lowerCAmelCase = model_class(A_ )
lowerCAmelCase = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(config.output_hidden_states , A_ )
check_decoder_attentions_output(A_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowerCAmelCase = True
lowerCAmelCase = model_class(A_ )
lowerCAmelCase = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
# Check attention is always last and order is fine
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = model_class(A_ )
lowerCAmelCase = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(A_ ) )
self.assertEqual(model.config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
@require_tf
class __snake_case( unittest.TestCase ):
'''simple docstring'''
@slow
def __snake_case ( self ) -> Any:
lowerCAmelCase = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase = model(A_ )[0]
lowerCAmelCase = [1, 6, 768]
self.assertEqual(output.shape , A_ )
lowerCAmelCase = tf.constant(
[
[
[-0.0_3_4_7_5_4_9_3, -0.4_6_8_6_0_3_4, -0.3_0_6_3_8_8_3_2],
[0.2_2_6_3_7_2_4_8, -0.2_6_9_8_8_6_4_6, -0.7_4_2_3_4_2_4],
[0.1_0_3_2_4_8_6_8, -0.4_5_0_1_3_5_0_8, -0.5_8_2_8_0_7_8_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1e-4 )
| 359 |
'''simple docstring'''
class __snake_case:
'''simple docstring'''
def __init__( self ) -> None:
lowerCAmelCase = {} # Mapping from char to TrieNode
lowerCAmelCase = False
def __snake_case ( self , A_ ) -> None:
for word in words:
self.insert(A_ )
def __snake_case ( self , A_ ) -> None:
lowerCAmelCase = self
for char in word:
if char not in curr.nodes:
lowerCAmelCase = TrieNode()
lowerCAmelCase = curr.nodes[char]
lowerCAmelCase = True
def __snake_case ( self , A_ ) -> bool:
lowerCAmelCase = self
for char in word:
if char not in curr.nodes:
return False
lowerCAmelCase = curr.nodes[char]
return curr.is_leaf
def __snake_case ( self , A_ ) -> None:
def _delete(A_ , A_ , A_ ) -> bool:
if index == len(A_ ):
# If word does not exist
if not curr.is_leaf:
return False
lowerCAmelCase = False
return len(curr.nodes ) == 0
lowerCAmelCase = word[index]
lowerCAmelCase = curr.nodes.get(A_ )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
lowerCAmelCase = _delete(A_ , A_ , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , A_ , 0 )
def _snake_case ( _SCREAMING_SNAKE_CASE : TrieNode , _SCREAMING_SNAKE_CASE : str ) -> None:
"""simple docstring"""
if node.is_leaf:
print(_SCREAMING_SNAKE_CASE , end=""" """ )
for key, value in node.nodes.items():
print_words(_SCREAMING_SNAKE_CASE , word + key )
def _snake_case ( ) -> bool:
"""simple docstring"""
lowerCAmelCase = """banana bananas bandana band apple all beast""".split()
lowerCAmelCase = TrieNode()
root.insert_many(_SCREAMING_SNAKE_CASE )
# print_words(root, "")
assert all(root.find(_SCREAMING_SNAKE_CASE ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def _snake_case ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : bool ) -> None:
"""simple docstring"""
print(str(_SCREAMING_SNAKE_CASE ) , """works!""" if passes else """doesn't work :(""" )
def _snake_case ( ) -> None:
"""simple docstring"""
assert test_trie()
def _snake_case ( ) -> None:
"""simple docstring"""
print_results("""Testing trie functionality""" , test_trie() )
if __name__ == "__main__":
main()
| 187 | 0 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 238 |
"""simple docstring"""
from functools import lru_cache
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =2
lowerCamelCase__ : Optional[int] =set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__lowerCamelCase )
if n > 1:
factors.add(__lowerCamelCase )
return factors
@lru_cache
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
return len(unique_prime_factors(__lowerCamelCase ) )
def snake_case__ ( __lowerCamelCase : list ):
"""simple docstring"""
return len(set(__lowerCamelCase ) ) in (0, 1)
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : Tuple =2
while True:
# Increment each value of a generated range
lowerCamelCase__ : Tuple =[base + i for i in range(__lowerCamelCase )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
lowerCamelCase__ : Optional[Any] =[upf_len(__lowerCamelCase ) for x in group]
checker.append(__lowerCamelCase )
# If all numbers in the list are equal, return the group variable.
if equality(__lowerCamelCase ):
return group
# Increment our base variable by 1
base += 1
def snake_case__ ( __lowerCamelCase : int = 4 ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =run(__lowerCamelCase )
return results[0] if len(__lowerCamelCase ) else None
if __name__ == "__main__":
print(solution())
| 238 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : Any = 1
snake_case : Any = 3
snake_case : Tuple = (32, 32)
snake_case : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(snake_case__ )
return image
@property
def _SCREAMING_SNAKE_CASE (self : str ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=snake_case__ , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def _SCREAMING_SNAKE_CASE (self : int ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
snake_case : List[Any] = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _SCREAMING_SNAKE_CASE (self : str ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
return CLIPTextModel(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Any ) -> Tuple:
'''simple docstring'''
snake_case : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case : Optional[int] = self.dummy_cond_unet_upscale
snake_case : Union[str, Any] = DDPMScheduler()
snake_case : Any = DDIMScheduler(prediction_type="v_prediction" )
snake_case : Union[str, Any] = self.dummy_vae
snake_case : str = self.dummy_text_encoder
snake_case : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
snake_case : Optional[int] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case : Tuple = Image.fromarray(np.uinta(snake_case__ ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
snake_case : List[Any] = StableDiffusionUpscalePipeline(
unet=snake_case__ , low_res_scheduler=snake_case__ , scheduler=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , max_noise_level=3_50 , )
snake_case : Any = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
snake_case : Optional[Any] = "A painting of a squirrel eating a burger"
snake_case : Optional[Any] = torch.Generator(device=snake_case__ ).manual_seed(0 )
snake_case : List[str] = sd_pipe(
[prompt] , image=snake_case__ , generator=snake_case__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
snake_case : Optional[int] = output.images
snake_case : List[Any] = torch.Generator(device=snake_case__ ).manual_seed(0 )
snake_case : Dict = sd_pipe(
[prompt] , image=snake_case__ , generator=snake_case__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=snake_case__ , )[0]
snake_case : str = image[0, -3:, -3:, -1]
snake_case : Tuple = image_from_tuple[0, -3:, -3:, -1]
snake_case : Any = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
snake_case : List[str] = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case : str = self.dummy_cond_unet_upscale
snake_case : List[Any] = DDPMScheduler()
snake_case : str = DDIMScheduler(prediction_type="v_prediction" )
snake_case : Union[str, Any] = self.dummy_vae
snake_case : Union[str, Any] = self.dummy_text_encoder
snake_case : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
snake_case : List[str] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case : Any = Image.fromarray(np.uinta(snake_case__ ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
snake_case : Tuple = StableDiffusionUpscalePipeline(
unet=snake_case__ , low_res_scheduler=snake_case__ , scheduler=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , max_noise_level=3_50 , )
snake_case : Optional[int] = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
snake_case : Union[str, Any] = "A painting of a squirrel eating a burger"
snake_case : Tuple = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
snake_case : int = output.images
assert image.shape[0] == 2
snake_case : List[Any] = torch.Generator(device=snake_case__ ).manual_seed(0 )
snake_case : Dict = sd_pipe(
[prompt] , image=snake_case__ , generator=snake_case__ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
snake_case : List[Any] = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> str:
'''simple docstring'''
snake_case : Tuple = self.dummy_cond_unet_upscale
snake_case : Any = DDPMScheduler()
snake_case : List[Any] = DDIMScheduler(prediction_type="v_prediction" )
snake_case : Dict = self.dummy_vae
snake_case : Any = self.dummy_text_encoder
snake_case : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
snake_case : Any = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case : Tuple = Image.fromarray(np.uinta(snake_case__ ) ).convert("RGB" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
snake_case : Any = unet.half()
snake_case : str = text_encoder.half()
# make sure here that pndm scheduler skips prk
snake_case : List[str] = StableDiffusionUpscalePipeline(
unet=snake_case__ , low_res_scheduler=snake_case__ , scheduler=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , max_noise_level=3_50 , )
snake_case : Union[str, Any] = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
snake_case : Optional[Any] = "A painting of a squirrel eating a burger"
snake_case : Optional[int] = torch.manual_seed(0 )
snake_case : Optional[int] = sd_pipe(
[prompt] , image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , ).images
snake_case : Optional[Any] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE (self : Dict ) -> List[str]:
'''simple docstring'''
snake_case : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
snake_case : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
snake_case : Tuple = "stabilityai/stable-diffusion-x4-upscaler"
snake_case : Tuple = StableDiffusionUpscalePipeline.from_pretrained(snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
snake_case : Dict = "a cat sitting on a park bench"
snake_case : List[str] = torch.manual_seed(0 )
snake_case : int = pipe(
prompt=snake_case__ , image=snake_case__ , generator=snake_case__ , output_type="np" , )
snake_case : Tuple = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def _SCREAMING_SNAKE_CASE (self : Any ) -> Dict:
'''simple docstring'''
snake_case : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
snake_case : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
snake_case : int = "stabilityai/stable-diffusion-x4-upscaler"
snake_case : str = StableDiffusionUpscalePipeline.from_pretrained(
snake_case__ , torch_dtype=torch.floataa , )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
snake_case : int = "a cat sitting on a park bench"
snake_case : int = torch.manual_seed(0 )
snake_case : str = pipe(
prompt=snake_case__ , image=snake_case__ , generator=snake_case__ , output_type="np" , )
snake_case : Optional[Any] = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
snake_case : str = "stabilityai/stable-diffusion-x4-upscaler"
snake_case : Optional[int] = StableDiffusionUpscalePipeline.from_pretrained(
snake_case__ , torch_dtype=torch.floataa , )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case : Union[str, Any] = "a cat sitting on a park bench"
snake_case : List[Any] = torch.manual_seed(0 )
snake_case : List[Any] = pipe(
prompt=snake_case__ , image=snake_case__ , generator=snake_case__ , num_inference_steps=5 , output_type="np" , )
snake_case : str = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 10 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__lowerCamelCase = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""DPTFeatureExtractor"""]
__lowerCamelCase = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 10 | 1 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =["image_processor", "tokenizer"]
UpperCamelCase ="CLIPImageProcessor"
UpperCamelCase =("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , **UpperCamelCase_ ) -> Optional[int]:
__lowercase : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCamelCase_ , )
__lowercase : Optional[int] = kwargs.pop('''feature_extractor''' )
__lowercase : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
def __call__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , **UpperCamelCase_ ) -> List[str]:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__lowercase : Optional[int] = self.tokenizer(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
if images is not None:
__lowercase : Optional[int] = self.image_processor(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
if text is not None and images is not None:
__lowercase : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase_ ) , tensor_type=UpperCamelCase_ )
def _lowerCamelCase ( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> List[Any]:
return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def _lowerCamelCase ( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[int]:
return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ )
@property
def _lowerCamelCase ( self ) -> List[Any]:
__lowercase : Any = self.tokenizer.model_input_names
__lowercase : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowerCamelCase ( self ) -> Union[str, Any]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCamelCase_ , )
return self.image_processor_class
@property
def _lowerCamelCase ( self ) -> Tuple:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCamelCase_ , )
return self.image_processor
| 249 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
a_ = True
except (ImportError, AttributeError):
a_ = object
def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
pass
a_ = False
a_ = logging.get_logger('transformers-cli/serving')
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : Optional[Any] = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(__UpperCamelCase , args.host , args.port , args.workers )
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =42
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =42
UpperCamelCase =42
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =42
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =42
class UpperCAmelCase_ ( snake_case ):
@staticmethod
def _lowerCamelCase ( UpperCamelCase_ ) -> Tuple:
__lowercase : Dict = parser.add_parser(
'''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''' , type=UpperCamelCase_ , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , )
serve_parser.add_argument('''--host''' , type=UpperCamelCase_ , default='''localhost''' , help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''' , type=UpperCamelCase_ , default=88_88 , help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''' , type=UpperCamelCase_ , default=1 , help='''Number of http workers''' )
serve_parser.add_argument('''--model''' , type=UpperCamelCase_ , help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''' , type=UpperCamelCase_ , help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''' , type=UpperCamelCase_ , help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''' , type=UpperCamelCase_ , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
serve_parser.set_defaults(func=UpperCamelCase_ )
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Any:
__lowercase : List[Any] = pipeline
__lowercase : str = host
__lowercase : List[str] = port
__lowercase : str = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install "transformers[serving]".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(F"""Serving model over {host}:{port}""" )
__lowercase : int = FastAPI(
routes=[
APIRoute(
'''/''' , self.model_info , response_model=UpperCamelCase_ , response_class=UpperCamelCase_ , methods=['''GET'''] , ),
APIRoute(
'''/tokenize''' , self.tokenize , response_model=UpperCamelCase_ , response_class=UpperCamelCase_ , methods=['''POST'''] , ),
APIRoute(
'''/detokenize''' , self.detokenize , response_model=UpperCamelCase_ , response_class=UpperCamelCase_ , methods=['''POST'''] , ),
APIRoute(
'''/forward''' , self.forward , response_model=UpperCamelCase_ , response_class=UpperCamelCase_ , methods=['''POST'''] , ),
] , timeout=6_00 , )
def _lowerCamelCase ( self ) -> Union[str, Any]:
run(self._app , host=self.host , port=self.port , workers=self.workers )
def _lowerCamelCase ( self ) -> Tuple:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def _lowerCamelCase ( self , UpperCamelCase_ = Body(UpperCamelCase_ , embed=UpperCamelCase_ ) , UpperCamelCase_ = Body(UpperCamelCase_ , embed=UpperCamelCase_ ) ) -> Optional[int]:
try:
__lowercase : Any = self._pipeline.tokenizer.tokenize(UpperCamelCase_ )
if return_ids:
__lowercase : Dict = self._pipeline.tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
return ServeTokenizeResult(tokens=UpperCamelCase_ , tokens_ids=UpperCamelCase_ )
else:
return ServeTokenizeResult(tokens=UpperCamelCase_ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={'''model''': '''''', '''error''': str(UpperCamelCase_ )} )
def _lowerCamelCase ( self , UpperCamelCase_ = Body(UpperCamelCase_ , embed=UpperCamelCase_ ) , UpperCamelCase_ = Body(UpperCamelCase_ , embed=UpperCamelCase_ ) , UpperCamelCase_ = Body(UpperCamelCase_ , embed=UpperCamelCase_ ) , ) -> Dict:
try:
__lowercase : Tuple = self._pipeline.tokenizer.decode(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return ServeDeTokenizeResult(model='''''' , text=UpperCamelCase_ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={'''model''': '''''', '''error''': str(UpperCamelCase_ )} )
async def _lowerCamelCase ( self , UpperCamelCase_=Body(UpperCamelCase_ , embed=UpperCamelCase_ ) ) -> Union[str, Any]:
# Check we don't have empty string
if len(UpperCamelCase_ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
__lowercase : Optional[Any] = self._pipeline(UpperCamelCase_ )
return ServeForwardResult(output=UpperCamelCase_ )
except Exception as e:
raise HTTPException(5_00 , {'''error''': str(UpperCamelCase_ )} )
| 249 | 1 |
'''simple docstring'''
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
a : Dict = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
a : str = concatenate_datasets
a : Dict = DownloadConfig
a : Union[str, Any] = DownloadManager
a : Dict = DownloadMode
a : str = DownloadConfig
a : Any = DownloadMode
a : Any = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 361 |
'''simple docstring'''
a : Dict = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def __lowerCamelCase ( ) -> None:
UpperCAmelCase : Optional[int] = input("""Enter message: """ )
UpperCAmelCase : Dict = input("""Enter key [alphanumeric]: """ )
UpperCAmelCase : Optional[Any] = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
UpperCAmelCase : List[str] = """encrypt"""
UpperCAmelCase : List[str] = encrypt_message(_lowercase , _lowercase )
elif mode.lower().startswith("""d""" ):
UpperCAmelCase : Tuple = """decrypt"""
UpperCAmelCase : str = decrypt_message(_lowercase , _lowercase )
print(F'''\n{mode.title()}ed message:''' )
print(_lowercase )
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
return translate_message(_lowercase , _lowercase , """encrypt""" )
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
return translate_message(_lowercase , _lowercase , """decrypt""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> str:
UpperCAmelCase : Optional[int] = []
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : Tuple = key.upper()
for symbol in message:
UpperCAmelCase : Dict = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(_lowercase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(_lowercase ):
UpperCAmelCase : Optional[int] = 0
else:
translated.append(_lowercase )
return "".join(_lowercase )
if __name__ == "__main__":
main()
| 338 | 0 |
'''simple docstring'''
snake_case_ : Union[str, Any] = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
snake_case_ : List[str] = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 12,
'Pm': 15,
'Em': 18,
'Zm': 21,
'Ym': 24,
}
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Tuple = from_type.lower().strip('s' )
_UpperCamelCase : Tuple = to_type.lower().strip('s' )
_UpperCamelCase : Optional[int] = UNIT_SYMBOL.get(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Tuple = UNIT_SYMBOL.get(UpperCAmelCase_ , UpperCAmelCase_ )
if from_sanitized not in METRIC_CONVERSION:
_UpperCamelCase : List[Any] = (
f'Invalid \'from_type\' value: {from_type!r}.\n'
f'Conversion abbreviations are: {", ".join(UpperCAmelCase_ )}'
)
raise ValueError(UpperCAmelCase_ )
if to_sanitized not in METRIC_CONVERSION:
_UpperCamelCase : int = (
f'Invalid \'to_type\' value: {to_type!r}.\n'
f'Conversion abbreviations are: {", ".join(UpperCAmelCase_ )}'
)
raise ValueError(UpperCAmelCase_ )
_UpperCamelCase : Tuple = METRIC_CONVERSION[from_sanitized]
_UpperCamelCase : int = METRIC_CONVERSION[to_sanitized]
_UpperCamelCase : str = 1
if from_exponent > to_exponent:
_UpperCamelCase : Tuple = from_exponent - to_exponent
else:
_UpperCamelCase : Optional[int] = -(to_exponent - from_exponent)
return value * pow(1_0 , UpperCAmelCase_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 83 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
snake_case_ : Any = logging.getLogger(__name__)
@dataclass
class lowercase__ :
lowercase__ = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
lowercase__ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowercase__ = field(
default=lowercase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class lowercase__ :
lowercase__ = field(default=lowercase , metadata={"""help""": """The input training data file (a text file)."""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
lowercase__ = field(
default=lowercase , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowercase__ = field(
default=lowercase , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
lowercase__ = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowercase__ = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
if self.train_file is not None:
_UpperCamelCase : List[Any] = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
_UpperCamelCase : Union[str, Any] = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class lowercase__ :
lowercase__ = 42
lowercase__ = True
lowercase__ = None
lowercase__ = None
def __call__( self : Optional[Any] ,lowerCamelCase__ : Dict ):
'''simple docstring'''
_UpperCamelCase : List[str] = 'label' if 'label' in features[0].keys() else 'labels'
_UpperCamelCase : List[Any] = [feature.pop(lowerCamelCase__ ) for feature in features]
_UpperCamelCase : Dict = len(lowerCamelCase__ )
_UpperCamelCase : List[str] = len(features[0]['input_ids'] )
_UpperCamelCase : List[Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(lowerCamelCase__ )] for feature in features
]
_UpperCamelCase : str = list(chain(*lowerCamelCase__ ) )
_UpperCamelCase : Tuple = self.tokenizer.pad(
lowerCamelCase__ ,padding=self.padding ,max_length=self.max_length ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors='pt' ,)
# Un-flatten
_UpperCamelCase : str = {k: v.view(lowerCamelCase__ ,lowerCamelCase__ ,-1 ) for k, v in batch.items()}
# Add back labels
_UpperCamelCase : Optional[int] = torch.tensor(lowerCamelCase__ ,dtype=torch.intaa )
return batch
def A__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCamelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , UpperCAmelCase_ , UpperCAmelCase_ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCamelCase : Optional[Any] = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase_ )
datasets.utils.logging.set_verbosity(UpperCAmelCase_ )
transformers.utils.logging.set_verbosity(UpperCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
_UpperCamelCase : Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCamelCase : List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
_UpperCamelCase : Optional[int] = {}
if data_args.train_file is not None:
_UpperCamelCase : Tuple = data_args.train_file
if data_args.validation_file is not None:
_UpperCamelCase : Tuple = data_args.validation_file
_UpperCamelCase : Any = data_args.train_file.split('.' )[-1]
_UpperCamelCase : Union[str, Any] = load_dataset(
UpperCAmelCase_ , data_files=UpperCAmelCase_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
_UpperCamelCase : List[str] = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCamelCase : int = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCamelCase : Dict = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
_UpperCamelCase : Any = [f'ending{i}' for i in range(4 )]
_UpperCamelCase : int = 'sent1'
_UpperCamelCase : List[str] = 'sent2'
if data_args.max_seq_length is None:
_UpperCamelCase : int = tokenizer.model_max_length
if max_seq_length > 1_0_2_4:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
_UpperCamelCase : int = 1_0_2_4
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
_UpperCamelCase : Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(UpperCAmelCase_ ):
_UpperCamelCase : str = [[context] * 4 for context in examples[context_name]]
_UpperCamelCase : Optional[Any] = examples[question_header_name]
_UpperCamelCase : Tuple = [
[f'{header} {examples[end][i]}' for end in ending_names] for i, header in enumerate(UpperCAmelCase_ )
]
# Flatten out
_UpperCamelCase : Optional[int] = list(chain(*UpperCAmelCase_ ) )
_UpperCamelCase : Optional[Any] = list(chain(*UpperCAmelCase_ ) )
# Tokenize
_UpperCamelCase : Tuple = tokenizer(
UpperCAmelCase_ , UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(UpperCAmelCase_ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
_UpperCamelCase : Optional[Any] = raw_datasets['train']
if data_args.max_train_samples is not None:
_UpperCamelCase : Tuple = min(len(UpperCAmelCase_ ) , data_args.max_train_samples )
_UpperCamelCase : Tuple = train_dataset.select(range(UpperCAmelCase_ ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
_UpperCamelCase : Union[str, Any] = train_dataset.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
_UpperCamelCase : str = raw_datasets['validation']
if data_args.max_eval_samples is not None:
_UpperCamelCase : Union[str, Any] = min(len(UpperCAmelCase_ ) , data_args.max_eval_samples )
_UpperCamelCase : str = eval_dataset.select(range(UpperCAmelCase_ ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
_UpperCamelCase : Dict = eval_dataset.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
_UpperCamelCase : List[Any] = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=UpperCAmelCase_ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(UpperCAmelCase_ ):
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = eval_predictions
_UpperCamelCase : List[str] = np.argmax(UpperCAmelCase_ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
_UpperCamelCase : Optional[int] = Trainer(
model=UpperCAmelCase_ , args=UpperCAmelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCAmelCase_ , data_collator=UpperCAmelCase_ , compute_metrics=UpperCAmelCase_ , )
# Training
if training_args.do_train:
_UpperCamelCase : Optional[int] = None
if training_args.resume_from_checkpoint is not None:
_UpperCamelCase : str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCamelCase : int = last_checkpoint
_UpperCamelCase : List[str] = trainer.train(resume_from_checkpoint=UpperCAmelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
_UpperCamelCase : Union[str, Any] = train_result.metrics
_UpperCamelCase : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCAmelCase_ )
)
_UpperCamelCase : Optional[Any] = min(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
trainer.log_metrics('train' , UpperCAmelCase_ )
trainer.save_metrics('train' , UpperCAmelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCamelCase : List[Any] = trainer.evaluate()
_UpperCamelCase : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCAmelCase_ )
_UpperCamelCase : int = min(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
trainer.log_metrics('eval' , UpperCAmelCase_ )
trainer.save_metrics('eval' , UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase_ )
else:
trainer.create_model_card(**UpperCAmelCase_ )
def A__ ( UpperCAmelCase_ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 83 | 1 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 361 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCamelCase__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 87 | 0 |
import os
import platform
import sys
lowerCamelCase_ = '''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 244 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase_ = random.Random()
if is_torch_available():
import torch
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=1.0 , __UpperCamelCase=None , __UpperCamelCase=None ):
"""simple docstring"""
if rng is None:
__A = global_rng
__A = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any, _lowerCamelCase : List[str], _lowerCamelCase : Any=7, _lowerCamelCase : Optional[int]=4_00, _lowerCamelCase : Optional[int]=20_00, _lowerCamelCase : Dict=1, _lowerCamelCase : Optional[Any]=0.0, _lowerCamelCase : int=1_60_00, _lowerCamelCase : Optional[int]=True, _lowerCamelCase : Dict=True, ):
'''simple docstring'''
__A = parent
__A = batch_size
__A = min_seq_length
__A = max_seq_length
__A = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__A = feature_size
__A = padding_value
__A = sampling_rate
__A = return_attention_mask
__A = do_normalize
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : Optional[Any]=False, _lowerCamelCase : int=False ):
'''simple docstring'''
def _flatten(_lowerCamelCase : List[str] ):
return list(itertools.chain(*_lowerCamelCase ) )
if equal_length:
__A = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__A = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
__A = [np.asarray(_lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : int = ASTFeatureExtractor
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
__A = ASTFeatureExtractionTester(self )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
# Tests that all call wrap to encode_plus and batch_encode_plus
__A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__A = [floats_list((1, x) )[0] for x in range(8_00, 14_00, 2_00 )]
__A = [np.asarray(_lowerCamelCase ) for speech_input in speech_inputs]
# Test not batched input
__A = feat_extract(speech_inputs[0], return_tensors='''np''' ).input_values
__A = feat_extract(np_speech_inputs[0], return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_lowerCamelCase, _lowerCamelCase, atol=1e-3 ) )
# Test batched
__A = feat_extract(_lowerCamelCase, padding=_lowerCamelCase, return_tensors='''np''' ).input_values
__A = feat_extract(_lowerCamelCase, padding=_lowerCamelCase, return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCamelCase, _lowerCamelCase ):
self.assertTrue(np.allclose(_lowerCamelCase, _lowerCamelCase, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__A = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
__A = np.asarray(_lowerCamelCase )
__A = feat_extract(_lowerCamelCase, return_tensors='''np''' ).input_values
__A = feat_extract(_lowerCamelCase, return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCamelCase, _lowerCamelCase ):
self.assertTrue(np.allclose(_lowerCamelCase, _lowerCamelCase, atol=1e-3 ) )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
import torch
__A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__A = np.random.rand(1_00 ).astype(np.floataa )
__A = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__A = feature_extractor.pad([{'''input_values''': inputs}], return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__A = feature_extractor.pad([{'''input_values''': inputs}], return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
from datasets import load_dataset
__A = load_dataset('''hf-internal-testing/librispeech_asr_dummy''', '''clean''', split='''validation''' )
# automatic decoding with librispeech
__A = ds.sort('''id''' ).select(range(_lowerCamelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
# fmt: off
__A = torch.tensor(
[-0.98_94, -1.27_76, -0.90_66, -1.27_76, -0.93_49, -1.26_09, -1.03_86, -1.27_76,
-1.15_61, -1.27_76, -1.20_52, -1.27_23, -1.21_90, -1.21_32, -1.27_76, -1.11_33,
-1.19_53, -1.13_43, -1.15_84, -1.22_03, -1.17_70, -1.24_74, -1.23_81, -1.19_36,
-0.92_70, -0.83_17, -0.80_49, -0.77_06, -0.75_65, -0.78_69] )
# fmt: on
__A = self._load_datasamples(1 )
__A = ASTFeatureExtractor()
__A = feature_extractor(_lowerCamelCase, return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape, (1, 10_24, 1_28) )
self.assertTrue(torch.allclose(input_values[0, 0, :30], _lowerCamelCase, atol=1e-4 ) )
| 266 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
lowercase__ = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
lowercase__ = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
lowercase__ = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
lowercase__ = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowercase__ = model(_lowercase )["last_hidden_state"].detach()
self.assertEqual(output.shape , _lowercase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _lowercase , atol=1e-3 ) )
@slow
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
lowercase__ = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
lowercase__ = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
lowercase__ = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowercase__ = model(_lowercase )["last_hidden_state"].detach()
self.assertEqual(output.shape , _lowercase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _lowercase , atol=1e-3 ) )
| 201 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
__lowerCamelCase = ViTImageProcessor if is_vision_available() else None
@property
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = (3, 32, 1_28)
lowercase__ = tempfile.mkdtemp()
# fmt: off
lowercase__ = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
lowercase__ = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_lowercase ) + "\n" )
lowercase__ = {
"do_normalize": False,
"do_resize": True,
"image_processor_type": "ViTImageProcessor",
"resample": 3,
"size": {"height": 32, "width": 1_28},
}
lowercase__ = os.path.join(self.tmpdirname , _lowercase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(_lowercase , _lowercase )
def UpperCAmelCase ( self :Optional[Any] , **_lowercase :str ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def UpperCAmelCase ( self :List[Any] , **_lowercase :List[str] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_lowercase )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self :str ):
'''simple docstring'''
lowercase__ = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )
lowercase__ = Image.fromarray(np.moveaxis(_lowercase , 0 , -1 ) )
return image_input
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_lowercase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowercase )
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowercase__ = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0 )
lowercase__ = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_lowercase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowercase )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(_lowercase , return_tensors="np" )
lowercase__ = processor(images=_lowercase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase )
lowercase__ = "test"
lowercase__ = processor(text=_lowercase )
lowercase__ = tokenizer(_lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase )
lowercase__ = "test"
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=_lowercase , images=_lowercase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "labels"] )
# test if it raises when no input is passed
with pytest.raises(_lowercase ):
processor()
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.char_decode(_lowercase )
lowercase__ = tokenizer.batch_decode(_lowercase )
lowercase__ = [seq.replace(" " , "" ) for seq in decoded_tok]
self.assertListEqual(_lowercase , _lowercase )
def UpperCAmelCase ( self :str ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase )
lowercase__ = None
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=_lowercase , images=_lowercase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase )
lowercase__ = torch.randn(1 , 27 , 38 )
lowercase__ = torch.randn(1 , 27 , 5_02_57 )
lowercase__ = torch.randn(1 , 27 , 3_05_22 )
lowercase__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"] )
| 201 | 1 |
'''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F"Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F"Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=True ):
'''simple docstring'''
model.train()
UpperCAmelCase : List[str] = model(__magic_name__ )
UpperCAmelCase : Union[str, Any] = F.mse_loss(__magic_name__ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(__magic_name__ )
def lowercase ( __magic_name__ , __magic_name__=False ):
'''simple docstring'''
set_seed(42 )
UpperCAmelCase : str = RegressionModel()
UpperCAmelCase : Optional[Any] = deepcopy(__magic_name__ )
UpperCAmelCase : Any = RegressionDataset(length=80 )
UpperCAmelCase : Optional[Any] = DataLoader(__magic_name__ , batch_size=16 )
model.to(accelerator.device )
if sched:
UpperCAmelCase : Optional[Any] = AdamW(params=model.parameters() , lr=1e-3 )
UpperCAmelCase : Union[str, Any] = AdamW(params=ddp_model.parameters() , lr=1e-3 )
UpperCAmelCase : int = LambdaLR(__magic_name__ , lr_lambda=lambda __magic_name__ : epoch**0.6_5 )
UpperCAmelCase : List[Any] = LambdaLR(__magic_name__ , lr_lambda=lambda __magic_name__ : epoch**0.6_5 )
# Make a copy of `model`
if sched:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = accelerator.prepare(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
else:
UpperCAmelCase , UpperCAmelCase : Dict = accelerator.prepare(__magic_name__ , __magic_name__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = get_training_setup(__magic_name__ )
# Use a single batch
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = next(iter(__magic_name__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase , UpperCAmelCase : Optional[int] = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase , UpperCAmelCase : Any = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__magic_name__ ):
step_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
else:
# Sync grads
step_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCAmelCase : Any = ddp_input[torch.randperm(len(__magic_name__ ) )]
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = get_training_setup(__magic_name__ )
# Use a single batch
UpperCAmelCase , UpperCAmelCase : List[str] = next(iter(__magic_name__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__magic_name__ ):
step_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
else:
# Sync grads
step_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCAmelCase : Tuple = ddp_input[torch.randperm(len(__magic_name__ ) )]
def lowercase ( __magic_name__=False , __magic_name__=False ):
'''simple docstring'''
UpperCAmelCase : Tuple = Accelerator(
split_batches=__magic_name__ , dispatch_batches=__magic_name__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = get_training_setup(__magic_name__ )
for iteration, batch in enumerate(__magic_name__ ):
UpperCAmelCase , UpperCAmelCase : Any = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase , UpperCAmelCase : Any = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase , UpperCAmelCase : Any = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(__magic_name__ ):
step_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(__magic_name__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCAmelCase : Tuple = ddp_input[torch.randperm(len(__magic_name__ ) )]
GradientState._reset_state()
def lowercase ( __magic_name__=False , __magic_name__=False ):
'''simple docstring'''
UpperCAmelCase : str = Accelerator(
split_batches=__magic_name__ , dispatch_batches=__magic_name__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = get_training_setup(__magic_name__ , __magic_name__ )
for iteration, batch in enumerate(__magic_name__ ):
UpperCAmelCase , UpperCAmelCase : List[str] = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase , UpperCAmelCase : Tuple = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase , UpperCAmelCase : int = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__magic_name__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(__magic_name__ ):
step_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F"Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n"
UpperCAmelCase : Union[str, Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__magic_name__ ))
if accelerator.num_processes > 1:
check_model_parameters(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = Accelerator()
UpperCAmelCase : Optional[int] = RegressionDataset(length=80 )
UpperCAmelCase : int = DataLoader(__magic_name__ , batch_size=16 )
UpperCAmelCase : Optional[int] = RegressionDataset(length=96 )
UpperCAmelCase : Optional[int] = DataLoader(__magic_name__ , batch_size=16 )
UpperCAmelCase , UpperCAmelCase : str = accelerator.prepare(__magic_name__ , __magic_name__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(__magic_name__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__magic_name__ )
if iteration < len(__magic_name__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(__magic_name__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__magic_name__ )
if batch_num < len(__magic_name__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = Accelerator()
UpperCAmelCase : List[str] = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(__magic_name__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(__magic_name__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , F"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation(__magic_name__ , __magic_name__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation_with_opt_and_scheduler(__magic_name__ , __magic_name__ )
def lowercase ( __magic_name__ ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 311 |
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
a : Tuple = re.compile(R"([A-Z]+)([A-Z][a-z])")
a : Union[str, Any] = re.compile(R"([a-z\d])([A-Z])")
a : str = re.compile(R"(?<!_)_(?!_)")
a : List[Any] = re.compile(R"(_{2,})")
a : List[Any] = R"^\w+(\.\w+)*$"
a : Dict = R"<>:/\|?*"
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = _uppercase_uppercase_re.sub(R"\1_\2" , __magic_name__ )
UpperCAmelCase : List[str] = _lowercase_uppercase_re.sub(R"\1_\2" , __magic_name__ )
return name.lower()
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = _single_underscore_re.split(__magic_name__ )
UpperCAmelCase : Union[str, Any] = [_multiple_underscores_re.split(__magic_name__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__magic_name__ ) if n != "" )
def lowercase ( __magic_name__ ):
'''simple docstring'''
if os.path.basename(__magic_name__ ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(__magic_name__ )
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if os.path.basename(__magic_name__ ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , __magic_name__ ):
raise ValueError(F"Split name should match '{_split_re}'' but got '{split}'." )
return F"{filename_prefix_for_name(__magic_name__ )}-{split}"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ )
if filetype_suffix:
prefix += F".{filetype_suffix}"
UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ )
return F"{filepath}*"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ )
UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ )
if shard_lengths:
UpperCAmelCase : Tuple = len(__magic_name__ )
UpperCAmelCase : Optional[int] = [F"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(__magic_name__ )]
if filetype_suffix:
UpperCAmelCase : Optional[int] = [filename + F".{filetype_suffix}" for filename in filenames]
return filenames
else:
UpperCAmelCase : int = prefix
if filetype_suffix:
filename += F".{filetype_suffix}"
return [filename]
| 311 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a ={"""processing_layoutxlm""": ["""LayoutXLMProcessor"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =["""LayoutXLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =["""LayoutXLMTokenizerFast"""]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 366 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : Tuple = TFCamembertModel.from_pretrained('jplu/tf-camembert-base')
__lowerCamelCase : str = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] ,dtype=tf.intaa ,) # J'aime le camembert !"
__lowerCamelCase : int = model(SCREAMING_SNAKE_CASE__)['last_hidden_state']
__lowerCamelCase : Dict = tf.TensorShape((1, 1_0, 7_6_8))
self.assertEqual(output.shape ,SCREAMING_SNAKE_CASE__)
# compare the actual values for a slice.
__lowerCamelCase : Any = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] ,dtype=tf.floataa ,)
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1E-4))
| 113 | 0 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : List[Any] = IFInpaintingPipeline
A_ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
A_ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A_ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
return self._get_dummy_components()
def __lowerCAmelCase ( self : Optional[int] , _A : Dict , _A : Optional[int]=0 ) -> List[Any]:
if str(_A ).startswith('mps' ):
__magic_name__ : Optional[Any] = torch.manual_seed(_A )
else:
__magic_name__ : Tuple = torch.Generator(device=_A ).manual_seed(_A )
__magic_name__ : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
__magic_name__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
__magic_name__ : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowerCAmelCase ( self : List[Any] ) -> int:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __lowerCAmelCase ( self : Dict ) -> Any:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __lowerCAmelCase ( self : Tuple ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __lowerCAmelCase ( self : Optional[int] ) -> List[str]:
self._test_save_load_local()
def __lowerCAmelCase ( self : Any ) -> int:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 331 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _lowerCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , _A : Optional[int] , _A : Union[str, Any]=13 , _A : Optional[int]=7 , _A : int=True , _A : Union[str, Any]=True , _A : Tuple=True , _A : Dict=True , _A : int=99 , _A : str=32 , _A : List[Any]=2 , _A : Any=4 , _A : List[str]=37 , _A : List[str]="gelu" , _A : Any=0.1 , _A : List[str]=0.1 , _A : Optional[Any]=512 , _A : str=16 , _A : Union[str, Any]=2 , _A : List[Any]=0.02 , _A : Any=3 , _A : str=4 , _A : int=None , ) -> int:
__magic_name__ : str = parent
__magic_name__ : List[Any] = 13
__magic_name__ : Union[str, Any] = 7
__magic_name__ : Tuple = True
__magic_name__ : Dict = True
__magic_name__ : Union[str, Any] = True
__magic_name__ : Tuple = True
__magic_name__ : int = 99
__magic_name__ : List[str] = 384
__magic_name__ : Optional[int] = 2
__magic_name__ : List[Any] = 4
__magic_name__ : int = 37
__magic_name__ : Union[str, Any] = 'gelu'
__magic_name__ : Optional[int] = 0.1
__magic_name__ : str = 0.1
__magic_name__ : Optional[Any] = 512
__magic_name__ : Any = 16
__magic_name__ : Union[str, Any] = 2
__magic_name__ : Any = 0.02
__magic_name__ : List[str] = 3
__magic_name__ : Tuple = 4
__magic_name__ : List[Any] = 128
__magic_name__ : Optional[Any] = 2
__magic_name__ : List[str] = 9
__magic_name__ : str = 1
__magic_name__ : List[str] = None
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
__magic_name__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Optional[Any] = None
if self.use_input_mask:
__magic_name__ : str = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : List[str] = None
if self.use_token_type_ids:
__magic_name__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : Tuple = None
__magic_name__ : Union[str, Any] = None
__magic_name__ : int = None
if self.use_labels:
__magic_name__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : int = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ : Optional[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_A , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self : int , _A : int , _A : str , _A : Union[str, Any] , _A : List[str] , _A : Tuple , _A : int , _A : Union[str, Any] ) -> Any:
__magic_name__ : Dict = TFConvBertModel(config=_A )
__magic_name__ : int = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__magic_name__ : Any = [input_ids, input_mask]
__magic_name__ : Tuple = model(_A )
__magic_name__ : List[Any] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : int , _A : str , _A : Dict , _A : Dict , _A : Dict , _A : Any , _A : Optional[int] , _A : int ) -> Optional[Any]:
__magic_name__ : Dict = TFConvBertForMaskedLM(config=_A )
__magic_name__ : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__magic_name__ : Dict = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self : Optional[int] , _A : str , _A : Union[str, Any] , _A : Tuple , _A : Dict , _A : Dict , _A : Union[str, Any] , _A : Dict ) -> Tuple:
__magic_name__ : Any = self.num_labels
__magic_name__ : str = TFConvBertForSequenceClassification(config=_A )
__magic_name__ : List[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__magic_name__ : Any = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self : int , _A : Dict , _A : Tuple , _A : str , _A : str , _A : int , _A : List[Any] , _A : Optional[int] ) -> Union[str, Any]:
__magic_name__ : Optional[Any] = self.num_choices
__magic_name__ : Optional[int] = TFConvBertForMultipleChoice(config=_A )
__magic_name__ : Union[str, Any] = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
__magic_name__ : str = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
__magic_name__ : Tuple = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
__magic_name__ : Optional[int] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__magic_name__ : Union[str, Any] = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self : List[Any] , _A : int , _A : List[str] , _A : int , _A : Tuple , _A : List[str] , _A : Any , _A : Optional[int] ) -> List[Any]:
__magic_name__ : List[Any] = self.num_labels
__magic_name__ : Union[str, Any] = TFConvBertForTokenClassification(config=_A )
__magic_name__ : Dict = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__magic_name__ : Any = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self : Optional[int] , _A : List[Any] , _A : Tuple , _A : List[Any] , _A : Optional[int] , _A : Tuple , _A : str , _A : List[str] ) -> int:
__magic_name__ : Dict = TFConvBertForQuestionAnswering(config=_A )
__magic_name__ : int = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__magic_name__ : Union[str, Any] = model(_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
__magic_name__ : List[str] = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : str = config_and_inputs
__magic_name__ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[int] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A_ : List[str] = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A_ : Tuple = False
A_ : Any = False
A_ : List[Any] = False
def __lowerCAmelCase ( self : List[Any] ) -> int:
__magic_name__ : Optional[Any] = TFConvBertModelTester(self )
__magic_name__ : List[Any] = ConfigTester(self , config_class=_A , hidden_size=37 )
def __lowerCAmelCase ( self : str ) -> Dict:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
__magic_name__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCAmelCase ( self : Optional[int] ) -> int:
__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
__magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
def __lowerCAmelCase ( self : List[str] ) -> Optional[int]:
__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def __lowerCAmelCase ( self : int ) -> Any:
__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def __lowerCAmelCase ( self : Dict ) -> List[str]:
__magic_name__ , __magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Optional[int] = True
__magic_name__ : Any = True
if hasattr(_A , 'use_cache' ):
__magic_name__ : List[Any] = True
__magic_name__ : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__magic_name__ : Optional[Any] = getattr(self.model_tester , 'key_length' , _A )
for model_class in self.all_model_classes:
__magic_name__ : List[str] = self._prepare_for_class(_A , _A )
__magic_name__ : Optional[int] = model_class(_A )
__magic_name__ : Tuple = len(model(_A ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A , saved_model=_A )
__magic_name__ : Union[str, Any] = os.path.join(_A , 'saved_model' , '1' )
__magic_name__ : Optional[int] = tf.keras.models.load_model(_A )
__magic_name__ : Optional[Any] = model(_A )
if self.is_encoder_decoder:
__magic_name__ : Optional[int] = outputs['encoder_hidden_states']
__magic_name__ : Tuple = outputs['encoder_attentions']
else:
__magic_name__ : Union[str, Any] = outputs['hidden_states']
__magic_name__ : Optional[Any] = outputs['attentions']
self.assertEqual(len(_A ) , _A )
__magic_name__ : Optional[Any] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_A ) , _A )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
__magic_name__ : Optional[Any] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(_A )
def __lowerCAmelCase ( self : List[str] ) -> Any:
__magic_name__ , __magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : str = True
__magic_name__ : Optional[int] = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
__magic_name__ : List[Any] = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__magic_name__ : List[Any] = getattr(self.model_tester , 'key_length' , _A )
__magic_name__ : Optional[int] = getattr(self.model_tester , 'key_length' , _A )
def check_decoder_attentions_output(_A : List[Any] ):
__magic_name__ : Tuple = len(_A )
self.assertEqual(out_len % 2 , 0 )
__magic_name__ : Any = outputs.decoder_attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_A : int ):
__magic_name__ : Dict = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__magic_name__ : Union[str, Any] = True
__magic_name__ : Tuple = False
__magic_name__ : List[str] = model_class(_A )
__magic_name__ : Any = model(self._prepare_for_class(_A , _A ) )
__magic_name__ : Tuple = len(_A )
self.assertEqual(config.output_hidden_states , _A )
check_encoder_attentions_output(_A )
if self.is_encoder_decoder:
__magic_name__ : Any = model_class(_A )
__magic_name__ : Any = model(self._prepare_for_class(_A , _A ) )
self.assertEqual(config.output_hidden_states , _A )
check_decoder_attentions_output(_A )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__magic_name__ : Optional[int] = True
__magic_name__ : Optional[int] = model_class(_A )
__magic_name__ : Optional[int] = model(self._prepare_for_class(_A , _A ) )
self.assertEqual(config.output_hidden_states , _A )
check_encoder_attentions_output(_A )
# Check attention is always last and order is fine
__magic_name__ : str = True
__magic_name__ : str = True
__magic_name__ : Optional[int] = model_class(_A )
__magic_name__ : str = model(self._prepare_for_class(_A , _A ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_A ) )
self.assertEqual(model.config.output_hidden_states , _A )
check_encoder_attentions_output(_A )
@require_tf
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self : int ) -> int:
__magic_name__ : List[Any] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
__magic_name__ : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
__magic_name__ : Tuple = model(_A )[0]
__magic_name__ : str = [1, 6, 768]
self.assertEqual(output.shape , _A )
__magic_name__ : Tuple = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _A , atol=1E-4 )
| 331 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
class snake_case_ (_a ):
UpperCAmelCase__ : List[Any] = """bert-generation"""
def __init__( self :int ,__snake_case :Dict=5_03_58 ,__snake_case :Dict=10_24 ,__snake_case :Any=24 ,__snake_case :Dict=16 ,__snake_case :Optional[Any]=40_96 ,__snake_case :Optional[int]="gelu" ,__snake_case :Any=0.1 ,__snake_case :Optional[Any]=0.1 ,__snake_case :Tuple=5_12 ,__snake_case :List[str]=0.02 ,__snake_case :Union[str, Any]=1E-12 ,__snake_case :int=0 ,__snake_case :Any=2 ,__snake_case :Optional[Any]=1 ,__snake_case :Optional[Any]="absolute" ,__snake_case :Dict=True ,**__snake_case :Tuple ,) -> str:
super().__init__(pad_token_id=__snake_case ,bos_token_id=__snake_case ,eos_token_id=__snake_case ,**__snake_case )
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = hidden_act
a__ = intermediate_size
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = initializer_range
a__ = layer_norm_eps
a__ = position_embedding_type
a__ = use_cache
| 368 |
from __future__ import annotations
def __lowercase ( __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ):
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
a__ , a__ = array[indexa], array[indexa]
def __lowercase ( __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ):
if length > 1:
a__ = int(length / 2 )
for i in range(__lowerCAmelCase , low + middle ):
comp_and_swap(__lowerCAmelCase , __lowerCAmelCase , i + middle , __lowerCAmelCase )
bitonic_merge(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
bitonic_merge(__lowerCAmelCase , low + middle , __lowerCAmelCase , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ):
if length > 1:
a__ = int(length / 2 )
bitonic_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , 1 )
bitonic_sort(__lowerCAmelCase , low + middle , __lowerCAmelCase , 0 )
bitonic_merge(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
snake_case : int = input('''Enter numbers separated by a comma:\n''').strip()
snake_case : Optional[int] = [int(item.strip()) for item in user_input.split(''',''')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('''\nSorted array in ascending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('''Sorted array in descending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
| 109 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : int = {
"""openai/imagegpt-small""": """""",
"""openai/imagegpt-medium""": """""",
"""openai/imagegpt-large""": """""",
}
class UpperCamelCase__ ( _lowercase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = "imagegpt"
_SCREAMING_SNAKE_CASE = ["past_key_values"]
_SCREAMING_SNAKE_CASE = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=5_1_2 + 1 , SCREAMING_SNAKE_CASE_ : int=3_2 * 3_2 , SCREAMING_SNAKE_CASE_ : Optional[int]=5_1_2 , SCREAMING_SNAKE_CASE_ : str=2_4 , SCREAMING_SNAKE_CASE_ : Optional[int]=8 , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : List[str]="quick_gelu" , SCREAMING_SNAKE_CASE_ : List[str]=0.1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Any=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1E-5 , SCREAMING_SNAKE_CASE_ : int=0.02 , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : str=False , **SCREAMING_SNAKE_CASE_ : List[Any] , ):
lowerCAmelCase_ : Dict = vocab_size
lowerCAmelCase_ : Optional[int] = n_positions
lowerCAmelCase_ : List[str] = n_embd
lowerCAmelCase_ : Optional[int] = n_layer
lowerCAmelCase_ : int = n_head
lowerCAmelCase_ : Dict = n_inner
lowerCAmelCase_ : Any = activation_function
lowerCAmelCase_ : Optional[int] = resid_pdrop
lowerCAmelCase_ : int = embd_pdrop
lowerCAmelCase_ : Optional[int] = attn_pdrop
lowerCAmelCase_ : Tuple = layer_norm_epsilon
lowerCAmelCase_ : Optional[Any] = initializer_range
lowerCAmelCase_ : Optional[int] = scale_attn_weights
lowerCAmelCase_ : Optional[int] = use_cache
lowerCAmelCase_ : List[Any] = scale_attn_by_inverse_layer_idx
lowerCAmelCase_ : Optional[Any] = reorder_and_upcast_attn
lowerCAmelCase_ : int = tie_word_embeddings
super().__init__(tie_word_embeddings=__lowerCamelCase , **__lowerCamelCase )
class UpperCamelCase__ ( _lowercase ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
] )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : "FeatureExtractionMixin" , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : int = -1 , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional["TensorType"] = None , SCREAMING_SNAKE_CASE_ : int = 3 , SCREAMING_SNAKE_CASE_ : int = 3_2 , SCREAMING_SNAKE_CASE_ : int = 3_2 , ):
lowerCAmelCase_ : Optional[int] = self._generate_dummy_images(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowerCAmelCase_ : Optional[Any] = dict(preprocessor(images=__lowerCamelCase , return_tensors=__lowerCamelCase ) )
return inputs
| 224 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
_snake_case = parser.parse_args()
_snake_case = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
_snake_case = CLIPImageProcessor()
_snake_case = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
_snake_case = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 157 | 0 |
"""simple docstring"""
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
a_ = 0B1011_0011_1110_1100_1001_0000_0111_1011_1011_0001_1001_1110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
a_ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class __snake_case :
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
__A : str = WATERMARK_BITS
__A : Optional[int] = WatermarkEncoder()
self.encoder.set_watermark('''bits''' , self.watermark )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
if images.shape[-1] < 256:
return images
__A : Any = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__A : Any = [self.encoder.encode(__lowerCamelCase , '''dwtDct''' ) for image in images]
__A : Union[str, Any] = torch.from_numpy(np.array(__lowerCamelCase ) ).permute(0 , 3 , 1 , 2 )
__A : Union[str, Any] = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 291 |
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def __lowercase ( snake_case_ : int ) ->bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(snake_case_ ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowercase ( ) ->Iterator[int]:
'''simple docstring'''
__A : int = 2
while True:
if is_prime(snake_case_ ):
yield num
num += 1
def __lowercase ( snake_case_ : int = 2000000 ) ->int:
'''simple docstring'''
return sum(takewhile(lambda snake_case_ : x < n ,prime_generator() ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 291 | 1 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase_ )
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,*lowerCamelCase__ : Tuple ,**lowerCamelCase__ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
super().__init__(*lowerCamelCase__ ,**lowerCamelCase__ )
self.check_model_type(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : List[str]=None ,lowerCamelCase__ : List[str]=None ,**lowerCamelCase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = {}, {}
if padding is not None:
SCREAMING_SNAKE_CASE = padding
if truncation is not None:
SCREAMING_SNAKE_CASE = truncation
if top_k is not None:
SCREAMING_SNAKE_CASE = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Dict ,lowerCamelCase__ : Union["Image.Image", str] ,lowerCamelCase__ : str = None ,**lowerCamelCase__ : Any ) -> Tuple:
'''simple docstring'''
if isinstance(lowerCamelCase__ ,(Image.Image, str) ) and isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = {"""image""": image, """question""": question}
else:
SCREAMING_SNAKE_CASE = image
SCREAMING_SNAKE_CASE = super().__call__(lowerCamelCase__ ,**lowerCamelCase__ )
return results
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Any=False ,lowerCamelCase__ : Optional[Any]=False ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = load_image(inputs["""image"""] )
SCREAMING_SNAKE_CASE = self.tokenizer(
inputs["""question"""] ,return_tensors=self.framework ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.image_processor(images=lowerCamelCase__ ,return_tensors=self.framework )
model_inputs.update(lowerCamelCase__ )
return model_inputs
def SCREAMING_SNAKE_CASE__ ( self : Tuple ,lowerCamelCase__ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model(**lowerCamelCase__ )
return model_outputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : int=5 ) -> Dict:
'''simple docstring'''
if top_k > self.model.config.num_labels:
SCREAMING_SNAKE_CASE = self.model.config.num_labels
if self.framework == "pt":
SCREAMING_SNAKE_CASE = model_outputs.logits.sigmoid()[0]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = probs.topk(lowerCamelCase__ )
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
SCREAMING_SNAKE_CASE = scores.tolist()
SCREAMING_SNAKE_CASE = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase__ ,lowerCamelCase__ )]
| 296 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __lowercase ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> List[str]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE )
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
__snake_case : List[str] = list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
__snake_case : List[int] = list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
__snake_case : List[int] = list_field(
default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Use FP16 to accelerate inference."} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Benchmark training of model"} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Verbose memory tracing"} )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Trace memory line by line"} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Save result to a CSV file"} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Save all print statements in a log file"} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Whether to print environment information"} )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
__snake_case : str = field(
default=F"inference_time_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving time results to csv."} , )
__snake_case : str = field(
default=F"inference_memory_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving memory results to csv."} , )
__snake_case : str = field(
default=F"train_time_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
__snake_case : str = field(
default=F"train_memory_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
__snake_case : str = field(
default=F"env_info_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving environment information."} , )
__snake_case : str = field(
default=F"log_{round(time() )}.csv" , metadata={"help": "Log filename used if print statements are saved in log."} , )
__snake_case : int = field(default=3 , metadata={"help": "Times an experiment will be run."} )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
warnings.warn(
F"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
""" are deprecated in general and it is advised to use external Benchmarking libraries """
""" to benchmark Transformer models.""" ,lowerCamelCase__ ,)
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) ,indent=2 )
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
"""Please make sure you provide at least one model name / model identifier, *e.g.* `--models"""
""" bert-base-cased` or `args.models = ['bert-base-cased'].""" )
return self.models
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int:
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("""Multiprocessing is currently not possible on TPU.""" )
return False
else:
return True
| 296 | 1 |
def lowerCamelCase_ ( lowerCamelCase__ ):
return "".join(chr(ord(lowerCamelCase__ ) - 3_2 ) if "a" <= char <= "z" else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 47 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
__A ={
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''label_embs_concat''': '''label_embeddings_concat''',
'''mask_emb''': '''masked_spec_embed''',
'''spk_proj''': '''speaker_proj''',
}
__A =[
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''label_embeddings_concat''',
'''speaker_proj''',
'''layer_norm_for_extract''',
]
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
for attribute in key.split("." ):
lowerCamelCase_ = getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
lowerCamelCase_ = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
lowerCamelCase_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
lowerCamelCase_ = value
elif weight_type == "weight_g":
lowerCamelCase_ = value
elif weight_type == "weight_v":
lowerCamelCase_ = value
elif weight_type == "bias":
lowerCamelCase_ = value
else:
lowerCamelCase_ = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = []
lowerCamelCase_ = fairseq_model.state_dict()
lowerCamelCase_ = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase_ = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , hf_model.config.feat_extract_norm == "group" , )
lowerCamelCase_ = True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase_ = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
lowerCamelCase_ = True
if "*" in mapped_key:
lowerCamelCase_ = name.split(lowerCamelCase__ )[0].split("." )[-2]
lowerCamelCase_ = mapped_key.replace("*" , lowerCamelCase__ )
if "weight_g" in name:
lowerCamelCase_ = "weight_g"
elif "weight_v" in name:
lowerCamelCase_ = "weight_v"
elif "bias" in name:
lowerCamelCase_ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase_ = "weight"
else:
lowerCamelCase_ = None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(F'Unused weights: {unused_weights}' )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = full_name.split("conv_layers." )[-1]
lowerCamelCase_ = name.split("." )
lowerCamelCase_ = int(items[0] )
lowerCamelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
lowerCamelCase_ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
lowerCamelCase_ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.' )
lowerCamelCase_ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' )
lowerCamelCase_ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowerCamelCase__ )
@torch.no_grad()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True ):
if config_path is not None:
lowerCamelCase_ = UniSpeechSatConfig.from_pretrained(lowerCamelCase__ )
else:
lowerCamelCase_ = UniSpeechSatConfig()
lowerCamelCase_ = ""
if is_finetuned:
lowerCamelCase_ = UniSpeechSatForCTC(lowerCamelCase__ )
else:
lowerCamelCase_ = UniSpeechSatForPreTraining(lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
lowerCamelCase_ = model[0].eval()
recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ )
hf_wavavec.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__A =parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 47 | 1 |
def a__ ( snake_case ):
"""simple docstring"""
if not all(x.isalpha() for x in string ):
raise ValueError('''String must only contain alphabetic characters.''' )
__SCREAMING_SNAKE_CASE : Tuple = sorted(string.lower() )
return len(snake_case ) == len(set(snake_case ) )
if __name__ == "__main__":
lowercase_ = input("""Enter a string """).strip()
lowercase_ = is_isogram(input_str)
print(f'''{input_str} is {'an' if isogram else 'not an'} isogram.''')
| 303 |
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 303 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__lowerCAmelCase = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['''DPTFeatureExtractor''']
__lowerCAmelCase = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 356 |
'''simple docstring'''
def __lowerCamelCase ( lowerCAmelCase_ ) -> list:
if any(not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or x < 0 for x in sequence ):
raise TypeError('Sequence must be list of non-negative integers' )
for _ in range(len(lowerCAmelCase_ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(lowerCAmelCase_ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 107 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : str = logging.get_logger(__name__)
def a_ ( lowerCamelCase , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=False ):
UpperCAmelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''transformer.blocks.{i}.norm1.weight''', f'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm1.bias''', f'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.weight''', f'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.bias''', f'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.weight''', f'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.bias''', f'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.mlp.fc1.weight''', f'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc1.bias''', f'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.weight''', f'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.bias''', f'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'),
(
'text_embeddings.position_embeddings.weight',
'vilt.embeddings.text_embeddings.position_embeddings.weight',
),
('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'),
(
'text_embeddings.token_type_embeddings.weight',
'vilt.embeddings.text_embeddings.token_type_embeddings.weight',
),
('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'),
('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'),
# patch embeddings
('transformer.cls_token', 'vilt.embeddings.cls_token'),
('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'),
('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'),
('transformer.pos_embed', 'vilt.embeddings.position_embeddings'),
# token type embeddings
('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'),
] )
# final layernorm + pooler
rename_keys.extend(
[
('transformer.norm.weight', 'vilt.layernorm.weight'),
('transformer.norm.bias', 'vilt.layernorm.bias'),
('pooler.dense.weight', 'vilt.pooler.dense.weight'),
('pooler.dense.bias', 'vilt.pooler.dense.bias'),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('vqa_classifier.0.weight', 'classifier.0.weight'),
('vqa_classifier.0.bias', 'classifier.0.bias'),
('vqa_classifier.1.weight', 'classifier.1.weight'),
('vqa_classifier.1.bias', 'classifier.1.bias'),
('vqa_classifier.3.weight', 'classifier.3.weight'),
('vqa_classifier.3.bias', 'classifier.3.bias'),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('nlvr2_classifier.0.weight', 'classifier.0.weight'),
('nlvr2_classifier.0.bias', 'classifier.0.bias'),
('nlvr2_classifier.1.weight', 'classifier.1.weight'),
('nlvr2_classifier.1.bias', 'classifier.1.bias'),
('nlvr2_classifier.3.weight', 'classifier.3.weight'),
('nlvr2_classifier.3.bias', 'classifier.3.bias'),
] )
else:
pass
return rename_keys
def a_ ( lowerCamelCase , lowerCamelCase ):
for i in range(config.num_hidden_layers ):
UpperCAmelCase__ = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase__ = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase__ = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase__ = in_proj_bias[: config.hidden_size]
UpperCAmelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase__ = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase__ = in_proj_bias[-config.hidden_size :]
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(_lowercase , _lowercase )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = dct.pop(_lowercase )
UpperCAmelCase__ = val
@torch.no_grad()
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = ViltConfig(image_size=3_8_4 , patch_size=3_2 , tie_word_embeddings=_lowercase )
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
if "vqa" in checkpoint_url:
UpperCAmelCase__ = True
UpperCAmelCase__ = 3_1_2_9
UpperCAmelCase__ = '''huggingface/label-files'''
UpperCAmelCase__ = '''vqa2-id2label.json'''
UpperCAmelCase__ = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='dataset' ) , 'r' ) )
UpperCAmelCase__ = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase__ = idalabel
UpperCAmelCase__ = {v: k for k, v in idalabel.items()}
UpperCAmelCase__ = ViltForQuestionAnswering(_lowercase )
elif "nlvr" in checkpoint_url:
UpperCAmelCase__ = True
UpperCAmelCase__ = 2
UpperCAmelCase__ = {0: '''False''', 1: '''True'''}
UpperCAmelCase__ = {v: k for k, v in config.idalabel.items()}
UpperCAmelCase__ = 3
UpperCAmelCase__ = ViltForImagesAndTextClassification(_lowercase )
elif "irtr" in checkpoint_url:
UpperCAmelCase__ = True
UpperCAmelCase__ = ViltForImageAndTextRetrieval(_lowercase )
elif "mlm_itm" in checkpoint_url:
UpperCAmelCase__ = True
UpperCAmelCase__ = ViltForMaskedLM(_lowercase )
else:
raise ValueError('Unknown model type' )
# load state_dict of original model, remove and rename some keys
UpperCAmelCase__ = torch.hub.load_state_dict_from_url(_lowercase , map_location='cpu' )['''state_dict''']
UpperCAmelCase__ = create_rename_keys(_lowercase , _lowercase , _lowercase , _lowercase )
for src, dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
read_in_q_k_v(_lowercase , _lowercase )
if mlm_model or irtr_model:
UpperCAmelCase__ = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(_lowercase , _lowercase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
UpperCAmelCase__ = model.load_state_dict(_lowercase , strict=_lowercase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(_lowercase )
# Define processor
UpperCAmelCase__ = ViltImageProcessor(size=3_8_4 )
UpperCAmelCase__ = BertTokenizer.from_pretrained('bert-base-uncased' )
UpperCAmelCase__ = ViltProcessor(_lowercase , _lowercase )
# Forward pass on example inputs (image + text)
if nlvr_model:
UpperCAmelCase__ = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=_lowercase ).raw )
UpperCAmelCase__ = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=_lowercase ).raw )
UpperCAmelCase__ = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
UpperCAmelCase__ = processor(_lowercase , _lowercase , return_tensors='pt' )
UpperCAmelCase__ = processor(_lowercase , _lowercase , return_tensors='pt' )
UpperCAmelCase__ = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
UpperCAmelCase__ = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=_lowercase ).raw )
if mlm_model:
UpperCAmelCase__ = '''a bunch of [MASK] laying on a [MASK].'''
else:
UpperCAmelCase__ = '''How many cats are there?'''
UpperCAmelCase__ = processor(_lowercase , _lowercase , return_tensors='pt' )
UpperCAmelCase__ = model(**_lowercase )
# Verify outputs
if mlm_model:
UpperCAmelCase__ = torch.Size([1, 1_1, 3_0_5_2_2] )
UpperCAmelCase__ = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowercase , atol=1e-4 )
# verify masked token prediction equals "cats"
UpperCAmelCase__ = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
UpperCAmelCase__ = torch.Size([1, 3_1_2_9] )
UpperCAmelCase__ = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowercase , atol=1e-4 )
# verify vqa prediction equals "2"
UpperCAmelCase__ = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
UpperCAmelCase__ = torch.Size([1, 2] )
UpperCAmelCase__ = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
if __name__ == "__main__":
lowerCAmelCase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowerCAmelCase__ : Union[str, Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 98 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase : List[str] = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : Any = PegasusTokenizer
lowerCamelCase : Optional[Any] = PegasusTokenizerFast
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : Union[str, Any] = True
def UpperCAmelCase__ (self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ : Optional[int] = PegasusTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ (self ):
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def UpperCAmelCase__ (self , **A ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
return ("This is a test", "This is a test")
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = '''</s>'''
lowerCamelCase_ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(A ) , 1_1_0_3 )
def UpperCAmelCase__ (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_1_0_3 )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : str = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
lowerCamelCase_ : Any = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
lowerCamelCase_ : Optional[int] = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCamelCase_ : Union[str, Any] = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
lowerCamelCase_ : Any = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
lowerCamelCase_ : List[Any] = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6_1_0_3
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_0_3
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_0_2_4
lowerCamelCase_ : Optional[Any] = '''To ensure a smooth flow of bank resolutions.'''
lowerCamelCase_ : Tuple = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
lowerCamelCase_ : str = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = ['''This is going to be way too long.''' * 1_5_0, '''short example''']
lowerCamelCase_ : int = ['''not super long but more than 5 tokens''', '''tiny''']
lowerCamelCase_ : List[Any] = self._large_tokenizer(A , padding=A , truncation=A , return_tensors='''pt''' )
lowerCamelCase_ : Dict = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 1_0_2_4)
assert batch.attention_mask.shape == (2, 1_0_2_4)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
@slow
def UpperCAmelCase__ (self ):
# fmt: off
lowerCamelCase_ : int = {'''input_ids''': [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : str = PegasusTokenizer
lowerCamelCase : Optional[Any] = PegasusTokenizerFast
lowerCamelCase : Tuple = True
lowerCamelCase : str = True
def UpperCAmelCase__ (self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ : str = PegasusTokenizer(A , offset=0 , mask_token_sent=A , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ (self ):
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def UpperCAmelCase__ (self , **A ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
return ("This is a test", "This is a test")
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Tuple = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
lowerCamelCase_ : Union[str, Any] = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
lowerCamelCase_ : int = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
@require_torch
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = ['''This is going to be way too long.''' * 1_0_0_0, '''short example''']
lowerCamelCase_ : str = ['''not super long but more than 5 tokens''', '''tiny''']
lowerCamelCase_ : Tuple = self._large_tokenizer(A , padding=A , truncation=A , return_tensors='''pt''' )
lowerCamelCase_ : Optional[int] = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 4_0_9_6)
assert batch.attention_mask.shape == (2, 4_0_9_6)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
lowerCamelCase_ : List[str] = self._large_tokenizer(A ).input_ids
self.assertListEqual(
A , [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1] , )
| 318 | 0 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def lowercase__ ( )-> Union[str, Any]:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__UpperCamelCase ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def lowercase__ ( )-> Tuple:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def lowercase__ ( )-> str:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__UpperCamelCase ):
http_head("""https://huggingface.co""" )
| 355 |
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-1'
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-2'
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-3'
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-4'
class a_ ( lowerCamelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , ) -> int:
"""simple docstring"""
super()._init_()
UpperCamelCase = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = StableDiffusionPipeline(
vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , requires_safety_checker=_SCREAMING_SNAKE_CASE , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def A__ ( self ) -> Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , _SCREAMING_SNAKE_CASE ) for k in self.config.keys() if not k.startswith("""_""" )}
def A__ ( self , _SCREAMING_SNAKE_CASE = "auto" ) -> Any:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
self.enable_attention_slicing(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> int:
"""simple docstring"""
UpperCamelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(_SCREAMING_SNAKE_CASE )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` must be divisible by 8 but are {height} and {width}." )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 183 | 0 |
"""simple docstring"""
import numpy as np
def a__ ( __SCREAMING_SNAKE_CASE ) -> np.array:
return 1 / (1 + np.exp(-vector ))
def a__ ( __SCREAMING_SNAKE_CASE ) -> np.array:
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 217 |
"""simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]:
__lowerCAmelCase: Optional[Any] = hf_hub_url(repo_id=__SCREAMING_SNAKE_CASE , path=__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE )
assert url == F"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(__SCREAMING_SNAKE_CASE )}"
| 217 | 1 |
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE__:Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(snake_case_ )
class snake_case__ ( snake_case_ ):
def __init__( self , *lowerCamelCase , **lowerCamelCase ):
super().__init__(*lowerCamelCase , **lowerCamelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def a__ ( self , lowerCamelCase=None ):
__a = {}
if top_k is not None:
__a = top_k
return {}, {}, postprocess_params
def __call__( self , lowerCamelCase , **lowerCamelCase ):
return super().__call__(lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = load_image(lowerCamelCase )
__a = self.image_processor(images=lowerCamelCase , return_tensors=self.framework )
return model_inputs
def a__ ( self , lowerCamelCase ):
__a = self.model(**lowerCamelCase )
return model_outputs
def a__ ( self , lowerCamelCase , lowerCamelCase=5 ):
if top_k > self.model.config.num_labels:
__a = self.model.config.num_labels
if self.framework == "pt":
__a = model_outputs.logits.softmax(-1 )[0]
__a , __a = probs.topk(lowerCamelCase )
elif self.framework == "tf":
__a = stable_softmax(model_outputs.logits , axis=-1 )[0]
__a = tf.math.top_k(lowerCamelCase , k=lowerCamelCase )
__a , __a = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
__a = scores.tolist()
__a = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase , lowerCamelCase )]
| 268 |
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
SCREAMING_SNAKE_CASE__:Any = {
"""E""": 12.70,
"""T""": 9.06,
"""A""": 8.17,
"""O""": 7.51,
"""I""": 6.97,
"""N""": 6.75,
"""S""": 6.33,
"""H""": 6.09,
"""R""": 5.99,
"""D""": 4.25,
"""L""": 4.03,
"""C""": 2.78,
"""U""": 2.76,
"""M""": 2.41,
"""W""": 2.36,
"""F""": 2.23,
"""G""": 2.02,
"""Y""": 1.97,
"""P""": 1.93,
"""B""": 1.29,
"""V""": 0.98,
"""K""": 0.77,
"""J""": 0.15,
"""X""": 0.15,
"""Q""": 0.10,
"""Z""": 0.07,
}
SCREAMING_SNAKE_CASE__:Optional[int] = """ETAOINSHRDLCUMWFGYPBVKJXQZ"""
SCREAMING_SNAKE_CASE__:Tuple = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def _lowerCamelCase( a ):
__a = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def _lowerCamelCase( a ):
return x[0]
def _lowerCamelCase( a ):
__a = get_letter_count(a )
__a = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(a )
__a = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=a )
__a = "".join(freq_to_letter[freq] )
__a = list(freq_to_letter_str.items() )
freq_pairs.sort(key=a , reverse=a )
__a = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(a )
def _lowerCamelCase( a ):
__a = get_frequency_order(a )
__a = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 268 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 148 |
"""simple docstring"""
import sys
from collections import defaultdict
class lowerCamelCase__ :
def __init__( self ):
"""simple docstring"""
snake_case : Dict = []
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.node_position[vertex]
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Dict = pos
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
snake_case : Any = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
snake_case : Any = 2 * start + 1
else:
snake_case : Union[str, Any] = 2 * start + 2
if heap[smallest_child] < heap[start]:
snake_case , snake_case : Dict = heap[smallest_child], positions[smallest_child]
snake_case , snake_case : Any = (
heap[start],
positions[start],
)
snake_case , snake_case : str = temp, tempa
snake_case : Dict = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , SCREAMING_SNAKE_CASE )
self.top_to_bottom(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Optional[Any] = position[index]
while index != 0:
snake_case : Dict = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
snake_case : Tuple = heap[parent]
snake_case : str = position[parent]
self.set_position(position[parent] , SCREAMING_SNAKE_CASE )
else:
snake_case : Union[str, Any] = val
snake_case : List[Any] = temp
self.set_position(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
break
snake_case : Optional[Any] = parent
else:
snake_case : Optional[int] = val
snake_case : List[Any] = temp
self.set_position(SCREAMING_SNAKE_CASE , 0 )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : List[str] = len(SCREAMING_SNAKE_CASE ) // 2 - 1
for i in range(SCREAMING_SNAKE_CASE , -1 , -1 ):
self.top_to_bottom(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Union[str, Any] = positions[0]
snake_case : List[str] = sys.maxsize
self.top_to_bottom(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
return temp
def UpperCamelCase__ ( lowercase__ : Union[str, Any] ):
snake_case : Tuple = Heap()
snake_case : List[str] = [0] * len(lowercase__ )
snake_case : Optional[int] = [-1] * len(lowercase__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
snake_case : Optional[int] = [] # Heap of Distance of vertices from their neighboring vertex
snake_case : List[Any] = []
for vertex in range(len(lowercase__ ) ):
distance_tv.append(sys.maxsize )
positions.append(lowercase__ )
heap.node_position.append(lowercase__ )
snake_case : Optional[int] = []
snake_case : Union[str, Any] = 1
snake_case : Union[str, Any] = sys.maxsize
for neighbor, distance in adjacency_list[0]:
snake_case : List[Any] = 0
snake_case : Tuple = distance
heap.heapify(lowercase__ , lowercase__ )
for _ in range(1 , len(lowercase__ ) ):
snake_case : Optional[Any] = heap.delete_minimum(lowercase__ , lowercase__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
snake_case : Optional[int] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(lowercase__ )]
):
snake_case : str = distance
heap.bottom_to_top(
lowercase__ , heap.get_position(lowercase__ ) , lowercase__ , lowercase__ )
snake_case : Optional[int] = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__A = int(input("Enter number of edges: ").strip())
__A = defaultdict(list)
for _ in range(edges_number):
__A = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 148 | 1 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 171 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase__ )
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : str = field(default='text-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
lowercase : ClassVar[Features] = Features({'text': Value('string' )} )
lowercase : ClassVar[Features] = Features({'labels': ClassLabel} )
lowercase : str = "text"
lowercase : str = "labels"
def __lowerCamelCase ( self , __UpperCamelCase ) -> List[str]:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , __UpperCamelCase ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
__UpperCamelCase : int = copy.deepcopy(self )
__UpperCamelCase : List[Any] = self.label_schema.copy()
__UpperCamelCase : Union[str, Any] = features[self.label_column]
__UpperCamelCase : Optional[Any] = label_schema
return task_template
@property
def __lowerCamelCase ( self ) -> Dict[str, str]:
'''simple docstring'''
return {
self.text_column: "text",
self.label_column: "labels",
}
| 171 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=3 , UpperCamelCase__=224 , UpperCamelCase__=30 , UpperCamelCase__=400 , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=[0.5, 0.5, 0.5] , UpperCamelCase__=[0.5, 0.5, 0.5] , ) -> str:
lowerCamelCase : int = size if size is not None else {"height": 18, "width": 18}
lowerCamelCase : int = parent
lowerCamelCase : str = batch_size
lowerCamelCase : List[Any] = num_channels
lowerCamelCase : Any = image_size
lowerCamelCase : Optional[int] = min_resolution
lowerCamelCase : Dict = max_resolution
lowerCamelCase : Tuple = do_resize
lowerCamelCase : Tuple = size
lowerCamelCase : Tuple = do_normalize
lowerCamelCase : Any = image_mean
lowerCamelCase : int = image_std
def _lowercase ( self ) -> List[Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCamelCase__ (lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Dict = ViTImageProcessor if is_vision_available() else None
def _lowercase ( self ) -> List[Any]:
lowerCamelCase : List[Any] = EfficientFormerImageProcessorTester(self )
@property
def _lowercase ( self ) -> Tuple:
return self.image_proc_tester.prepare_image_processor_dict()
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , "image_mean" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "image_std" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_resize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "size" ) )
def _lowercase ( self ) -> List[Any]:
pass
def _lowercase ( self ) -> List[str]:
# Initialize image_processor
lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase : Tuple = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
lowerCamelCase : Dict = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
lowerCamelCase : Union[str, Any] = image_processor(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def _lowercase ( self ) -> str:
# Initialize image_processor
lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase : Optional[int] = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
lowerCamelCase : List[Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
lowerCamelCase : Tuple = image_processor(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def _lowercase ( self ) -> int:
# Initialize image_processor
lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase : int = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
lowerCamelCase : Union[str, Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
lowerCamelCase : Tuple = image_processor(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
| 48 |
def A ( _SCREAMING_SNAKE_CASE ) -> list:
if n_term == "":
return []
lowerCamelCase : list = []
for temp in range(int(_SCREAMING_SNAKE_CASE ) ):
series.append(f'''1/{temp + 1}''' if series else "1" )
return series
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Dict = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 48 | 1 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowerCAmelCase ( snake_case_ ):
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case : Tuple = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , "tf_padding" ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , "depth_multiplier" ) )
class _lowerCAmelCase :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=3 , UpperCamelCase__=32 , UpperCamelCase__=0.25 , UpperCamelCase__=8 , UpperCamelCase__=True , UpperCamelCase__=1024 , UpperCamelCase__=32 , UpperCamelCase__="relu6" , UpperCamelCase__=0.1 , UpperCamelCase__=0.02 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=10 , UpperCamelCase__=None , ) -> List[Any]:
'''simple docstring'''
snake_case : int = parent
snake_case : int = batch_size
snake_case : List[Any] = num_channels
snake_case : Any = image_size
snake_case : str = depth_multiplier
snake_case : Dict = min_depth
snake_case : List[Any] = tf_padding
snake_case : Tuple = int(last_hidden_size * depth_multiplier )
snake_case : Tuple = output_stride
snake_case : int = hidden_act
snake_case : int = classifier_dropout_prob
snake_case : Any = use_labels
snake_case : Union[str, Any] = is_training
snake_case : Dict = num_labels
snake_case : Dict = initializer_range
snake_case : Optional[Any] = scope
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case : str = None
snake_case : Dict = None
if self.use_labels:
snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
snake_case : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case : Optional[int] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Union[str, Any] = MobileNetVaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
snake_case : List[str] = self.num_labels
snake_case : int = MobileNetVaForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : Tuple = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Any = self.prepare_config_and_inputs()
snake_case : List[str] = config_and_inputs
snake_case : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
__UpperCAmelCase : List[str] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
__UpperCAmelCase : Tuple = (
{'''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : Any = False
__UpperCAmelCase : int = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : str = False
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[str] = MobileNetVaModelTester(self )
snake_case : Union[str, Any] = MobileNetVaConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV1 does not use inputs_embeds" )
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason="MobileNetV1 does not support input and output embeddings" )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="MobileNetV1 does not output attentions" )
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
pass
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : List[Any] = model_class(UpperCamelCase__ )
snake_case : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : int = [*signature.parameters.keys()]
snake_case : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
def check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
snake_case : Union[str, Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
snake_case : Any = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
snake_case : str = outputs.hidden_states
snake_case : List[str] = 26
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Optional[int] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case : str = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : Union[str, Any] = MobileNetVaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def __lowerCAmelCase ( ) -> List[str]:
"""simple docstring"""
snake_case : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224" ) if is_vision_available() else None
)
@slow
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case : List[str] = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224" ).to(UpperCamelCase__ )
snake_case : List[Any] = self.default_image_processor
snake_case : Dict = prepare_img()
snake_case : Optional[int] = image_processor(images=UpperCamelCase__ , return_tensors="pt" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
snake_case : Any = model(**UpperCamelCase__ )
# verify the logits
snake_case : List[Any] = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
snake_case : Any = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 360 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( snake_case_ , unittest.TestCase ):
__UpperCAmelCase : List[Any] = MgpstrTokenizer
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Optional[int] = {}
__UpperCAmelCase : List[str] = False
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
super().setUp()
# fmt: off
snake_case : List[str] = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
snake_case : Union[str, Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
snake_case : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + "\n" )
def lowerCamelCase ( self , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
snake_case : int = "tester"
snake_case : List[str] = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
pass
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
snake_case : int = self.get_tokenizers(do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
snake_case : List[Any] = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
snake_case : str = tokenizer.encode([special_token] , add_special_tokens=UpperCamelCase__ )
self.assertEqual(len(UpperCamelCase__ ) , 1 )
snake_case : List[Any] = tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
self.assertTrue(special_token not in decoded )
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
snake_case ,snake_case : Union[str, Any] = self.get_input_output_texts(UpperCamelCase__ )
snake_case : Dict = tokenizer.tokenize(UpperCamelCase__ )
snake_case : Tuple = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
snake_case : List[Any] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Dict = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertNotEqual(len(UpperCamelCase__ ) , 0 )
snake_case : Tuple = tokenizer.decode(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(text_a.replace(" " , "" ) , UpperCamelCase__ )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
pass
| 112 | 0 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def UpperCamelCase__( UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple )->Dict:
A__ = 1.5
A__ = int(factor * num_class_images )
A__ = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=UpperCamelCase__ , aesthetic_weight=0.1 )
os.makedirs(f"{class_data_dir}/images" , exist_ok=UpperCamelCase__ )
if len(list(Path(f"{class_data_dir}/images" ).iterdir() ) ) >= num_class_images:
return
while True:
A__ = client.query(text=UpperCamelCase__ )
if len(UpperCamelCase__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
A__ = int(factor * num_images )
A__ = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=UpperCamelCase__ , aesthetic_weight=0.1 , )
A__ = 0
A__ = 0
A__ = tqdm(desc='''downloading real regularization images''' , total=UpperCamelCase__ )
with open(f"{class_data_dir}/caption.txt" , '''w''' ) as fa, open(f"{class_data_dir}/urls.txt" , '''w''' ) as fa, open(
f"{class_data_dir}/images.txt" , '''w''' ) as fa:
while total < num_class_images:
A__ = class_images[count]
count += 1
try:
A__ = requests.get(images['''url'''] )
if img.status_code == 2_00:
A__ = Image.open(BytesIO(img.content ) )
with open(f"{class_data_dir}/images/{total}.jpg" , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(f"{class_data_dir}/images/{total}.jpg" + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def UpperCamelCase__( )->List[str]:
A__ = argparse.ArgumentParser('''''' , add_help=UpperCamelCase__ )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=UpperCamelCase__ , type=UpperCamelCase__ )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=UpperCamelCase__ , type=UpperCamelCase__ )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=2_00 , type=UpperCamelCase__ )
return parser.parse_args()
if __name__ == "__main__":
a__: Optional[Any] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 193 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCamelCase__( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple=0.999 , UpperCamelCase__ : Any="cosine" , )->List[str]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCamelCase__ : Optional[int] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCamelCase__ : str ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" )
A__ = []
for i in range(UpperCamelCase__ ):
A__ = i / num_diffusion_timesteps
A__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCamelCase__ ) / alpha_bar_fn(UpperCamelCase__ ) , UpperCamelCase__ ) )
return torch.tensor(UpperCamelCase__ , dtype=torch.floataa )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = [e.name for e in KarrasDiffusionSchedulers]
__SCREAMING_SNAKE_CASE = 2
@register_to_config
def __init__( self,__lowerCamelCase = 1000,__lowerCamelCase = 0.00085,__lowerCamelCase = 0.012,__lowerCamelCase = "linear",__lowerCamelCase = None,__lowerCamelCase = "epsilon",__lowerCamelCase = False,__lowerCamelCase = False,__lowerCamelCase = 1.0,__lowerCamelCase = "linspace",__lowerCamelCase = 0,):
if trained_betas is not None:
A__ = torch.tensor(__lowerCamelCase,dtype=torch.floataa )
elif beta_schedule == "linear":
A__ = torch.linspace(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
A__ = (
torch.linspace(beta_start**0.5,beta_end**0.5,__lowerCamelCase,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
A__ = betas_for_alpha_bar(__lowerCamelCase,alpha_transform_type='''cosine''' )
elif beta_schedule == "exp":
A__ = betas_for_alpha_bar(__lowerCamelCase,alpha_transform_type='''exp''' )
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}" )
A__ = 1.0 - self.betas
A__ = torch.cumprod(self.alphas,dim=0 )
# set all values
self.set_timesteps(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase )
A__ = use_karras_sigmas
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase=None ):
if schedule_timesteps is None:
A__ = self.timesteps
A__ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
A__ = 1 if len(__lowerCamelCase ) > 1 else 0
else:
A__ = timestep.cpu().item() if torch.is_tensor(__lowerCamelCase ) else timestep
A__ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCamelCase ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,):
A__ = self.index_for_timestep(__lowerCamelCase )
A__ = self.sigmas[step_index]
A__ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None,__lowerCamelCase = None,):
A__ = num_inference_steps
A__ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
A__ = np.linspace(0,num_train_timesteps - 1,__lowerCamelCase,dtype=__lowerCamelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
A__ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A__ = (np.arange(0,__lowerCamelCase ) * step_ratio).round()[::-1].copy().astype(__lowerCamelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
A__ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A__ = (np.arange(__lowerCamelCase,0,-step_ratio )).round().copy().astype(__lowerCamelCase )
timesteps -= 1
else:
raise ValueError(
f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." )
A__ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
A__ = np.log(__lowerCamelCase )
A__ = np.interp(__lowerCamelCase,np.arange(0,len(__lowerCamelCase ) ),__lowerCamelCase )
if self.config.use_karras_sigmas:
A__ = self._convert_to_karras(in_sigmas=__lowerCamelCase,num_inference_steps=self.num_inference_steps )
A__ = np.array([self._sigma_to_t(__lowerCamelCase,__lowerCamelCase ) for sigma in sigmas] )
A__ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
A__ = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase )
A__ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
A__ = torch.from_numpy(__lowerCamelCase )
A__ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(__lowerCamelCase ).startswith('''mps''' ):
# mps does not support float64
A__ = timesteps.to(__lowerCamelCase,dtype=torch.floataa )
else:
A__ = timesteps.to(device=__lowerCamelCase )
# empty dt and derivative
A__ = None
A__ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
A__ = defaultdict(__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
# get log sigma
A__ = np.log(__lowerCamelCase )
# get distribution
A__ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
A__ = np.cumsum((dists >= 0),axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
A__ = low_idx + 1
A__ = log_sigmas[low_idx]
A__ = log_sigmas[high_idx]
# interpolate sigmas
A__ = (low - log_sigma) / (low - high)
A__ = np.clip(__lowerCamelCase,0,1 )
# transform interpolation to time range
A__ = (1 - w) * low_idx + w * high_idx
A__ = t.reshape(sigma.shape )
return t
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
A__ = in_sigmas[-1].item()
A__ = in_sigmas[0].item()
A__ = 7.0 # 7.0 is the value used in the paper
A__ = np.linspace(0,1,__lowerCamelCase )
A__ = sigma_min ** (1 / rho)
A__ = sigma_max ** (1 / rho)
A__ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def UpperCamelCase ( self ):
return self.dt is None
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase = True,):
A__ = self.index_for_timestep(__lowerCamelCase )
# advance index counter by 1
A__ = timestep.cpu().item() if torch.is_tensor(__lowerCamelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
A__ = self.sigmas[step_index]
A__ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
A__ = self.sigmas[step_index - 1]
A__ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
A__ = 0
A__ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
A__ = sigma_hat if self.state_in_first_order else sigma_next
A__ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
A__ = sigma_hat if self.state_in_first_order else sigma_next
A__ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
A__ = model_output
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" )
if self.config.clip_sample:
A__ = pred_original_sample.clamp(
-self.config.clip_sample_range,self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
A__ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
A__ = sigma_next - sigma_hat
# store for 2nd order step
A__ = derivative
A__ = dt
A__ = sample
else:
# 2. 2nd order / Heun's method
A__ = (sample - pred_original_sample) / sigma_next
A__ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
A__ = self.dt
A__ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
A__ = None
A__ = None
A__ = None
A__ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
A__ = self.sigmas.to(device=original_samples.device,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__lowerCamelCase ):
# mps does not support float64
A__ = self.timesteps.to(original_samples.device,dtype=torch.floataa )
A__ = timesteps.to(original_samples.device,dtype=torch.floataa )
else:
A__ = self.timesteps.to(original_samples.device )
A__ = timesteps.to(original_samples.device )
A__ = [self.index_for_timestep(__lowerCamelCase,__lowerCamelCase ) for t in timesteps]
A__ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
A__ = sigma.unsqueeze(-1 )
A__ = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 193 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class _snake_case ( snake_case , snake_case ):
UpperCamelCase__ = 1
@register_to_config
def __init__( self , _a=2_000 , _a=0.1 , _a=20 , _a=1e-3 ):
__magic_name__ : Dict = None
__magic_name__ : Tuple = None
__magic_name__ : Tuple = None
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
__magic_name__ : Tuple = torch.linspace(1 , self.config.sampling_eps , _a , device=_a )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a=None ):
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__magic_name__ : Any = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__magic_name__ : Union[str, Any] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__magic_name__ : Optional[Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
__magic_name__ : List[Any] = std.unsqueeze(-1 )
__magic_name__ : List[str] = -score / std
# compute
__magic_name__ : str = -1.0 / len(self.timesteps )
__magic_name__ : List[str] = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__magic_name__ : Tuple = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__magic_name__ : List[str] = beta_t.unsqueeze(-1 )
__magic_name__ : Dict = -0.5 * beta_t * x
__magic_name__ : Dict = torch.sqrt(_a )
__magic_name__ : List[Any] = drift - diffusion**2 * score
__magic_name__ : List[str] = x + drift * dt
# add noise
__magic_name__ : int = randn_tensor(x.shape , layout=x.layout , generator=_a , device=x.device , dtype=x.dtype )
__magic_name__ : List[str] = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ):
return self.config.num_train_timesteps
| 41 |
import re
import string
import numpy as np
import datasets
snake_case : Any = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n"
snake_case : Optional[Any] = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n"
snake_case : Union[str, Any] = "\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a=None , _a=False , _a=False , _a=False , ):
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__magic_name__ : Any = np.array([re.sub(_a , "" , _a ) for x in predictions] )
__magic_name__ : Tuple = np.array([re.sub(_a , "" , _a ) for x in references] )
else:
__magic_name__ : Union[str, Any] = np.asarray(_a )
__magic_name__ : List[Any] = np.asarray(_a )
if ignore_case:
__magic_name__ : List[Any] = np.char.lower(_a )
__magic_name__ : Optional[int] = np.char.lower(_a )
if ignore_punctuation:
__magic_name__ : Optional[Any] = string.punctuation.maketrans("" , "" , string.punctuation )
__magic_name__ : int = np.char.translate(_a , table=_a )
__magic_name__ : Optional[Any] = np.char.translate(_a , table=_a )
if ignore_numbers:
__magic_name__ : Optional[Any] = string.digits.maketrans("" , "" , string.digits )
__magic_name__ : Any = np.char.translate(_a , table=_a )
__magic_name__ : List[str] = np.char.translate(_a , table=_a )
__magic_name__ : Dict = predictions == references
return {"exact_match": np.mean(_a ) * 100}
| 41 | 1 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
lowerCAmelCase__ = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def _A ( A__ ):
"""simple docstring"""
__lowercase = torch.load(__lowerCamelCase , map_location='''cpu''' )
return sd
def _A ( A__ , A__ , A__=rename_keys_prefix ):
"""simple docstring"""
__lowercase = OrderedDict()
__lowercase = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__lowercase = key
for name_pair in rename_keys_prefix:
__lowercase = new_key.replace(name_pair[0] , name_pair[1] )
__lowercase = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__lowercase = new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def _A ( A__ , A__ ):
"""simple docstring"""
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), F"The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."
# Get Config
if "pre" in checkpoint_path:
__lowercase = '''pretraining'''
if "vcr" in checkpoint_path:
__lowercase = {'''visual_embedding_dim''': 512}
elif "vqa_advanced" in checkpoint_path:
__lowercase = {'''visual_embedding_dim''': 2048}
elif "vqa" in checkpoint_path:
__lowercase = {'''visual_embedding_dim''': 2048}
elif "nlvr" in checkpoint_path:
__lowercase = {'''visual_embedding_dim''': 1024}
else:
raise NotImplementedError(F"No implementation found for `{checkpoint_path}`." )
else:
if "vcr" in checkpoint_path:
__lowercase = {'''visual_embedding_dim''': 512}
__lowercase = '''multichoice'''
elif "vqa_advanced" in checkpoint_path:
__lowercase = {'''visual_embedding_dim''': 2048}
__lowercase = '''vqa_advanced'''
elif "vqa" in checkpoint_path:
__lowercase = {'''visual_embedding_dim''': 2048, '''num_labels''': 3129}
__lowercase = '''vqa'''
elif "nlvr" in checkpoint_path:
__lowercase = {
'''visual_embedding_dim''': 1024,
'''num_labels''': 2,
}
__lowercase = '''nlvr'''
__lowercase = VisualBertConfig(**__lowerCamelCase )
# Load State Dict
__lowercase = load_state_dict(__lowerCamelCase )
__lowercase = get_new_dict(__lowerCamelCase , __lowerCamelCase )
if model_type == "pretraining":
__lowercase = VisualBertForPreTraining(__lowerCamelCase )
elif model_type == "vqa":
__lowercase = VisualBertForQuestionAnswering(__lowerCamelCase )
elif model_type == "nlvr":
__lowercase = VisualBertForVisualReasoning(__lowerCamelCase )
elif model_type == "multichoice":
__lowercase = VisualBertForMultipleChoice(__lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
# Save Checkpoints
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
lowerCAmelCase__ = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 104 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__UpperCAmelCase = logging.getLogger(__name__)
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="summarization"
UpperCAmelCase_ =["loss"]
UpperCAmelCase_ =ROUGE_KEYS
UpperCAmelCase_ ="rouge2"
def __init__( self , _A , **_A ) -> Tuple:
if hparams.sortish_sampler and hparams.gpus > 1:
SCREAMING_SNAKE_CASE_ = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''' )
if hparams.sortish_sampler:
raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''' )
super().__init__(_A , num_labels=_A , mode=self.mode , **_A )
use_task_specific_params(self.model , '''summarization''' )
save_git_info(self.hparams.output_dir )
SCREAMING_SNAKE_CASE_ = Path(self.output_dir ) / '''metrics.json'''
SCREAMING_SNAKE_CASE_ = Path(self.output_dir ) / '''hparams.pkl'''
pickle_save(self.hparams , self.hparams_save_path )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = defaultdict(_A )
SCREAMING_SNAKE_CASE_ = self.config.model_type
SCREAMING_SNAKE_CASE_ = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size
SCREAMING_SNAKE_CASE_ = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
SCREAMING_SNAKE_CASE_ = {
'''train''': self.hparams.n_train,
'''val''': self.hparams.n_val,
'''test''': self.hparams.n_test,
}
SCREAMING_SNAKE_CASE_ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
SCREAMING_SNAKE_CASE_ = {
'''train''': self.hparams.max_target_length,
'''val''': self.hparams.val_max_target_length,
'''test''': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
SCREAMING_SNAKE_CASE_ = get_git_info()['''repo_sha''']
SCREAMING_SNAKE_CASE_ = hparams.num_workers
SCREAMING_SNAKE_CASE_ = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , _A ):
SCREAMING_SNAKE_CASE_ = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
SCREAMING_SNAKE_CASE_ = self.decoder_start_token_id
SCREAMING_SNAKE_CASE_ = (
SeqaSeqDataset if hasattr(self.tokenizer , '''prepare_seq2seq_batch''' ) else LegacySeqaSeqDataset
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
SCREAMING_SNAKE_CASE_ = self.hparams.eval_max_gen_length
else:
SCREAMING_SNAKE_CASE_ = self.model.config.max_length
SCREAMING_SNAKE_CASE_ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def _UpperCamelCase ( self , _A ) -> Dict[str, List[str]]:
SCREAMING_SNAKE_CASE_ = {
k: self.tokenizer.batch_decode(v.tolist() ) if '''mask''' not in k else v.shape for k, v in batch.items()
}
save_json(_A , Path(self.output_dir ) / '''text_batch.json''' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / '''tok_batch.json''' )
SCREAMING_SNAKE_CASE_ = True
return readable_batch
def _UpperCamelCase ( self , _A , **_A ) -> List[str]:
return self.model(_A , **_A )
def _UpperCamelCase ( self , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer.batch_decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A )
return lmap(str.strip , _A )
def _UpperCamelCase ( self , _A ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.tokenizer.pad_token_id
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = batch['''input_ids'''], batch['''attention_mask''']
SCREAMING_SNAKE_CASE_ = batch['''labels''']
if isinstance(self.model , _A ):
SCREAMING_SNAKE_CASE_ = self.model._shift_right(_A )
else:
SCREAMING_SNAKE_CASE_ = shift_tokens_right(_A , _A )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
SCREAMING_SNAKE_CASE_ = decoder_input_ids
self.save_readable_batch(_A )
SCREAMING_SNAKE_CASE_ = self(_A , attention_mask=_A , decoder_input_ids=_A , use_cache=_A )
SCREAMING_SNAKE_CASE_ = outputs['''logits''']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
SCREAMING_SNAKE_CASE_ = nn.CrossEntropyLoss(ignore_index=_A )
assert lm_logits.shape[-1] == self.vocab_size
SCREAMING_SNAKE_CASE_ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
SCREAMING_SNAKE_CASE_ = nn.functional.log_softmax(_A , dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = label_smoothed_nll_loss(
_A , _A , self.hparams.label_smoothing , ignore_index=_A )
return (loss,)
@property
def _UpperCamelCase ( self ) -> int:
return self.tokenizer.pad_token_id
def _UpperCamelCase ( self , _A , _A ) -> Dict:
SCREAMING_SNAKE_CASE_ = self._step(_A )
SCREAMING_SNAKE_CASE_ = dict(zip(self.loss_names , _A ) )
# tokens per batch
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].ne(self.pad ).sum() + batch['''labels'''].ne(self.pad ).sum()
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].shape[0]
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].eq(self.pad ).sum()
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def _UpperCamelCase ( self , _A , _A ) -> Dict:
return self._generative_step(_A )
def _UpperCamelCase ( self , _A , _A="val" ) -> Dict:
self.step_count += 1
SCREAMING_SNAKE_CASE_ = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
SCREAMING_SNAKE_CASE_ = losses['''loss''']
SCREAMING_SNAKE_CASE_ = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['''gen_time''', '''gen_len''']
}
SCREAMING_SNAKE_CASE_ = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
SCREAMING_SNAKE_CASE_ = torch.tensor(_A ).type_as(_A )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(_A )
SCREAMING_SNAKE_CASE_ = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
SCREAMING_SNAKE_CASE_ = self.step_count
self.metrics[prefix].append(_A ) # callback writes this to self.metrics_save_path
SCREAMING_SNAKE_CASE_ = flatten_list([x['''preds'''] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def _UpperCamelCase ( self , _A , _A ) -> Dict:
return calculate_rouge(_A , _A )
def _UpperCamelCase ( self , _A ) -> dict:
SCREAMING_SNAKE_CASE_ = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
SCREAMING_SNAKE_CASE_ = self.model.generate(
batch['''input_ids'''] , attention_mask=batch['''attention_mask'''] , use_cache=_A , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
SCREAMING_SNAKE_CASE_ = (time.time() - ta) / batch['''input_ids'''].shape[0]
SCREAMING_SNAKE_CASE_ = self.ids_to_clean_text(_A )
SCREAMING_SNAKE_CASE_ = self.ids_to_clean_text(batch['''labels'''] )
SCREAMING_SNAKE_CASE_ = self._step(_A )
SCREAMING_SNAKE_CASE_ = dict(zip(self.loss_names , _A ) )
SCREAMING_SNAKE_CASE_ = self.calc_generative_metrics(_A , _A )
SCREAMING_SNAKE_CASE_ = np.mean(lmap(_A , _A ) )
base_metrics.update(gen_time=_A , gen_len=_A , preds=_A , target=_A , **_A )
return base_metrics
def _UpperCamelCase ( self , _A , _A ) -> Any:
return self._generative_step(_A )
def _UpperCamelCase ( self , _A ) -> Optional[int]:
return self.validation_epoch_end(_A , prefix='''test''' )
def _UpperCamelCase ( self , _A ) -> SeqaSeqDataset:
SCREAMING_SNAKE_CASE_ = self.n_obs[type_path]
SCREAMING_SNAKE_CASE_ = self.target_lens[type_path]
SCREAMING_SNAKE_CASE_ = self.dataset_class(
self.tokenizer , type_path=_A , n_obs=_A , max_target_length=_A , **self.dataset_kwargs , )
return dataset
def _UpperCamelCase ( self , _A , _A , _A = False ) -> DataLoader:
SCREAMING_SNAKE_CASE_ = self.get_dataset(_A )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE_ = dataset.make_sortish_sampler(_A , distributed=self.hparams.gpus > 1 )
return DataLoader(
_A , batch_size=_A , collate_fn=dataset.collate_fn , shuffle=_A , num_workers=self.num_workers , sampler=_A , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE_ = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
_A , batch_sampler=_A , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
_A , batch_size=_A , collate_fn=dataset.collate_fn , shuffle=_A , num_workers=self.num_workers , sampler=_A , )
def _UpperCamelCase ( self ) -> DataLoader:
SCREAMING_SNAKE_CASE_ = self.get_dataloader('''train''' , batch_size=self.hparams.train_batch_size , shuffle=_A )
return dataloader
def _UpperCamelCase ( self ) -> DataLoader:
return self.get_dataloader('''val''' , batch_size=self.hparams.eval_batch_size )
def _UpperCamelCase ( self ) -> DataLoader:
return self.get_dataloader('''test''' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def _UpperCamelCase ( _A , _A ) -> Dict:
BaseTransformer.add_model_specific_args(_A , _A )
add_generic_args(_A , _A )
parser.add_argument(
'''--max_source_length''' , default=1024 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--max_target_length''' , default=56 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--val_max_target_length''' , default=142 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--test_max_target_length''' , default=142 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument('''--freeze_encoder''' , action='''store_true''' )
parser.add_argument('''--freeze_embeds''' , action='''store_true''' )
parser.add_argument('''--sortish_sampler''' , action='''store_true''' , default=_A )
parser.add_argument('''--overwrite_output_dir''' , action='''store_true''' , default=_A )
parser.add_argument('''--max_tokens_per_batch''' , type=_A , default=_A )
parser.add_argument('''--logger_name''' , type=_A , choices=['''default''', '''wandb''', '''wandb_shared'''] , default='''default''' )
parser.add_argument('''--n_train''' , type=_A , default=-1 , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_val''' , type=_A , default=500 , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_test''' , type=_A , default=-1 , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument(
'''--task''' , type=_A , default='''summarization''' , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument('''--label_smoothing''' , type=_A , default=0.0 , required=_A )
parser.add_argument('''--src_lang''' , type=_A , default='''''' , required=_A )
parser.add_argument('''--tgt_lang''' , type=_A , default='''''' , required=_A )
parser.add_argument('''--eval_beams''' , type=_A , default=_A , required=_A )
parser.add_argument(
'''--val_metric''' , type=_A , default=_A , required=_A , choices=['''bleu''', '''rouge2''', '''loss''', None] )
parser.add_argument('''--eval_max_gen_length''' , type=_A , default=_A , help='''never generate more than n tokens''' )
parser.add_argument('''--save_top_k''' , type=_A , default=1 , required=_A , help='''How many checkpoints to save''' )
parser.add_argument(
'''--early_stopping_patience''' , type=_A , default=-1 , required=_A , help=(
'''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'''
''' val_check_interval will effect it.'''
) , )
return parser
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="translation"
UpperCAmelCase_ =["loss"]
UpperCAmelCase_ =["bleu"]
UpperCAmelCase_ ="bleu"
def __init__( self , _A , **_A ) -> Optional[int]:
super().__init__(_A , **_A )
SCREAMING_SNAKE_CASE_ = hparams.src_lang
SCREAMING_SNAKE_CASE_ = hparams.tgt_lang
def _UpperCamelCase ( self , _A , _A ) -> dict:
return calculate_bleu(_A , _A )
def A__ ( __lowerCamelCase, __lowerCamelCase=None ):
Path(args.output_dir ).mkdir(exist_ok=__lowerCamelCase )
check_output_dir(__lowerCamelCase, expected_items=3 )
if model is None:
if "summarization" in args.task:
SCREAMING_SNAKE_CASE_ = SummarizationModule(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_ = TranslationModule(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('''/tmp''' )
or str(args.output_dir ).startswith('''/var''' )
):
SCREAMING_SNAKE_CASE_ = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE_ = os.environ.get('''WANDB_PROJECT''', __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = WandbLogger(name=model.output_dir.name, project=__lowerCamelCase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE_ = WandbLogger(name=model.output_dir.name, project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
SCREAMING_SNAKE_CASE_ = get_early_stopping_callback(model.val_metric, args.early_stopping_patience )
else:
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = args.val_metric == '''loss'''
SCREAMING_SNAKE_CASE_ = generic_train(
__lowerCamelCase, __lowerCamelCase, logging_callback=SeqaSeqLoggingCallback(), checkpoint_callback=get_checkpoint_callback(
args.output_dir, model.val_metric, args.save_top_k, __lowerCamelCase ), early_stopping_callback=__lowerCamelCase, logger=__lowerCamelCase, )
pickle_save(model.hparams, model.output_dir / '''hparams.pkl''' )
if not args.do_predict:
return model
SCREAMING_SNAKE_CASE_ = ''''''
SCREAMING_SNAKE_CASE_ = sorted(glob.glob(os.path.join(args.output_dir, '''*.ckpt''' ), recursive=__lowerCamelCase ) )
if checkpoints:
SCREAMING_SNAKE_CASE_ = checkpoints[-1]
SCREAMING_SNAKE_CASE_ = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
__UpperCAmelCase = pl.Trainer.add_argparse_args(parser)
__UpperCAmelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__UpperCAmelCase = parser.parse_args()
main(args)
| 299 | 0 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
__snake_case : Any = 'docs/source/en/_toctree.yml'
def __lowerCamelCase ( __snake_case : Tuple ) -> Union[str, Any]:
"""simple docstring"""
A__ : str =defaultdict(__snake_case )
for doc in model_doc:
counts[doc["local"]] += 1
A__ : str =[key for key, value in counts.items() if value > 1]
A__ : Union[str, Any] =[]
for duplicate_key in duplicates:
A__ : Union[str, Any] =list({doc["""title"""] for doc in model_doc if doc["""local"""] == duplicate_key} )
if len(__snake_case ) > 1:
raise ValueError(
f"{duplicate_key} is present several times in the documentation table of content at "
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["""local"""]] == 1] )
# Sort
return sorted(__snake_case, key=lambda __snake_case : s["title"].lower() )
def __lowerCamelCase ( __snake_case : Any=False ) -> Tuple:
"""simple docstring"""
with open(__snake_case, encoding="""utf-8""" ) as f:
A__ : Dict =yaml.safe_load(f.read() )
# Get to the API doc
A__ : List[Any] =0
while content[api_idx]["title"] != "API":
api_idx += 1
A__ : str =content[api_idx]["""sections"""]
# Then to the model doc
A__ : Any =0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
A__ : Any =api_doc[model_idx]["""sections"""]
A__ : List[Any] =[(idx, section) for idx, section in enumerate(__snake_case ) if """sections""" in section]
A__ : Union[str, Any] =False
for idx, modality_doc in modalities_docs:
A__ : Tuple =modality_doc["""sections"""]
A__ : str =clean_model_doc_toc(__snake_case )
if old_modality_doc != new_modality_doc:
A__ : int =True
if overwrite:
A__ : List[Any] =new_modality_doc
if diff:
if overwrite:
A__ : List[str] =model_doc
A__ : str =api_doc
with open(__snake_case, """w""", encoding="""utf-8""" ) as f:
f.write(yaml.dump(__snake_case, allow_unicode=__snake_case ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
__snake_case : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__snake_case : Dict = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 136 |
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = (CMStochasticIterativeScheduler,)
__snake_case = 10
def lowercase__ ( self : List[str] , **lowerCAmelCase_ : Dict ) -> Dict:
'''simple docstring'''
A__ : int ={
"""num_train_timesteps""": 2_01,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**lowerCAmelCase_ )
return config
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A__ : Dict =10
A__ : str =self.get_scheduler_config()
A__ : Any =self.scheduler_classes[0](**lowerCAmelCase_ )
scheduler.set_timesteps(lowerCAmelCase_ )
A__ : List[Any] =scheduler.timesteps[0]
A__ : Union[str, Any] =scheduler.timesteps[1]
A__ : Optional[int] =self.dummy_sample
A__ : Union[str, Any] =0.1 * sample
A__ : Dict =scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
A__ : int =scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def lowercase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=lowerCAmelCase_ )
def lowercase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
A__ : Union[str, Any] =self.scheduler_classes[0]
A__ : Dict =self.get_scheduler_config()
A__ : Any =scheduler_class(**lowerCAmelCase_ )
A__ : int =1
scheduler.set_timesteps(lowerCAmelCase_ )
A__ : Any =scheduler.timesteps
A__ : Optional[int] =torch.manual_seed(0 )
A__ : List[Any] =self.dummy_model()
A__ : Optional[int] =self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(lowerCAmelCase_ ):
# 1. scale model input
A__ : Optional[Any] =scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict noise residual
A__ : Union[str, Any] =model(lowerCAmelCase_ , lowerCAmelCase_ )
# 3. predict previous sample x_t-1
A__ : Tuple =scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
A__ : Dict =pred_prev_sample
A__ : Union[str, Any] =torch.sum(torch.abs(lowerCAmelCase_ ) )
A__ : Optional[Any] =torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 192.7614 ) < 1e-2
assert abs(result_mean.item() - 0.2510 ) < 1e-3
def lowercase__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
A__ : Dict =self.scheduler_classes[0]
A__ : Dict =self.get_scheduler_config()
A__ : Tuple =scheduler_class(**lowerCAmelCase_ )
A__ : Tuple =[1_06, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
A__ : List[Any] =scheduler.timesteps
A__ : Optional[Any] =torch.manual_seed(0 )
A__ : int =self.dummy_model()
A__ : int =self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
A__ : Any =scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict noise residual
A__ : List[str] =model(lowerCAmelCase_ , lowerCAmelCase_ )
# 3. predict previous sample x_t-1
A__ : Dict =scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
A__ : Union[str, Any] =pred_prev_sample
A__ : Union[str, Any] =torch.sum(torch.abs(lowerCAmelCase_ ) )
A__ : List[Any] =torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 347.6357 ) < 1e-2
assert abs(result_mean.item() - 0.4527 ) < 1e-3
def lowercase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
A__ : Optional[Any] =self.scheduler_classes[0]
A__ : Union[str, Any] =self.get_scheduler_config()
A__ : List[Any] =scheduler_class(**lowerCAmelCase_ )
A__ : Tuple =[39, 30, 12, 15, 0]
with self.assertRaises(lowerCAmelCase_ , msg="""`timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
A__ : Union[str, Any] =self.scheduler_classes[0]
A__ : List[str] =self.get_scheduler_config()
A__ : Tuple =scheduler_class(**lowerCAmelCase_ )
A__ : Dict =[39, 30, 12, 1, 0]
A__ : int =len(lowerCAmelCase_ )
with self.assertRaises(lowerCAmelCase_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_ )
def lowercase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
A__ : Optional[Any] =self.scheduler_classes[0]
A__ : Any =self.get_scheduler_config()
A__ : Optional[int] =scheduler_class(**lowerCAmelCase_ )
A__ : List[str] =[scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
| 136 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : Optional[int] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Tuple ) -> Any:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_euler' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array([0.0_447, 0.0_492, 0.0_468, 0.0_408, 0.0_383, 0.0_408, 0.0_354, 0.0_380, 0.0_339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self : List[str] ) -> Dict:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_euler' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array([0.1_237, 0.1_320, 0.1_438, 0.1_359, 0.1_390, 0.1_132, 0.1_277, 0.1_175, 0.1_112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=1_5 , output_type='np' , use_karras_sigmas=UpperCAmelCase__ , )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array(
[0.11_381_689, 0.12_112_921, 0.1_389_457, 0.12_549_606, 0.1_244_964, 0.10_831_517, 0.11_562_866, 0.10_867_816, 0.10_499_048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 4 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : List[Any] = current_set.copy()
for row_index, row in enumerate(SCREAMING_SNAKE_CASE ):
A_ : List[str] = row[0]
for column_index, column in enumerate(SCREAMING_SNAKE_CASE ):
if magnitude == 0:
A_ : Union[str, Any] = column
continue
A_ : Dict = column / magnitude
# Subtract to cancel term
A_ : Union[str, Any] = current_set[0]
A_ : Tuple = [first_row]
A_ : int = current_set[1::]
for row in current_set:
A_ : Tuple = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(SCREAMING_SNAKE_CASE )
continue
for column_index in range(len(SCREAMING_SNAKE_CASE ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(SCREAMING_SNAKE_CASE )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
A_ : Optional[Any] = final_set[0]
A_ : Any = []
A_ : str = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
A_ : Optional[Any] = simplify(SCREAMING_SNAKE_CASE )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , SCREAMING_SNAKE_CASE )
A_ : List[Any] = resultant
return final_set
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
if len(SCREAMING_SNAKE_CASE ) == 0:
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
A_ : str = len(SCREAMING_SNAKE_CASE ) + 1
if any(len(SCREAMING_SNAKE_CASE ) != _length for item in equations ):
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
for row in equations:
if any(not isinstance(SCREAMING_SNAKE_CASE , (int, float) ) for column in row ):
raise ValueError('''solve_simultaneous() requires lists of integers''' )
if len(SCREAMING_SNAKE_CASE ) == 1:
return [equations[0][-1] / equations[0][0]]
A_ : Dict = equations.copy()
if any(0 in row for row in data_set ):
A_ : Tuple = data_set.copy()
A_ : Optional[Any] = []
for row_index, row in enumerate(SCREAMING_SNAKE_CASE ):
if 0 not in row:
A_ : str = data_set.pop(SCREAMING_SNAKE_CASE )
break
if not full_row:
raise ValueError('''solve_simultaneous() requires at least 1 full equation''' )
data_set.insert(0 , SCREAMING_SNAKE_CASE )
A_ : int = data_set.copy()
A_ : Dict = simplify(SCREAMING_SNAKE_CASE )
A_ : Dict = simplified[::-1]
A_ : list = []
for row in simplified:
A_ : Union[str, Any] = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
A_ : Optional[Any] = row.copy()[: len(SCREAMING_SNAKE_CASE ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(SCREAMING_SNAKE_CASE ) == 0:
solutions.append(0 )
continue
A_ : int = temp_row[1::]
A_ : int = temp_row[::-1]
for column_index, column in enumerate(SCREAMING_SNAKE_CASE ):
current_solution -= column * solutions[column_index]
solutions.append(SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = []
for item in solutions:
final.append(float(round(SCREAMING_SNAKE_CASE , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 186 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase = {
'''configuration_lxmert''': ['''LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LxmertConfig'''],
'''tokenization_lxmert''': ['''LxmertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''LxmertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''LxmertEncoder''',
'''LxmertForPreTraining''',
'''LxmertForQuestionAnswering''',
'''LxmertModel''',
'''LxmertPreTrainedModel''',
'''LxmertVisualFeatureEncoder''',
'''LxmertXLayer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLxmertForPreTraining''',
'''TFLxmertMainLayer''',
'''TFLxmertModel''',
'''TFLxmertPreTrainedModel''',
'''TFLxmertVisualFeatureEncoder''',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 362 |
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=1 ) -> Optional[Any]:
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> Dict:
snake_case_ = []
for old_item in old_list:
snake_case_ = old_item.replace('in_layers.0' , 'norm1' )
snake_case_ = new_item.replace('in_layers.2' , 'conv1' )
snake_case_ = new_item.replace('out_layers.0' , 'norm2' )
snake_case_ = new_item.replace('out_layers.3' , 'conv2' )
snake_case_ = new_item.replace('emb_layers.1' , 'time_emb_proj' )
snake_case_ = new_item.replace('skip_connection' , 'conv_shortcut' )
snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> Union[str, Any]:
snake_case_ = []
for old_item in old_list:
snake_case_ = old_item
snake_case_ = new_item.replace('norm.weight' , 'group_norm.weight' )
snake_case_ = new_item.replace('norm.bias' , 'group_norm.bias' )
snake_case_ = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
snake_case_ = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None ) -> Optional[Any]:
assert isinstance(UpperCAmelCase , UpperCAmelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
snake_case_ = old_checkpoint[path]
snake_case_ = old_tensor.shape[0] // 3
snake_case_ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
snake_case_ = old_tensor.shape[0] // config['num_head_channels'] // 3
snake_case_ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
snake_case_ , snake_case_ , snake_case_ = old_tensor.split(channels // num_heads , dim=1 )
snake_case_ = query.reshape(UpperCAmelCase )
snake_case_ = key.reshape(UpperCAmelCase )
snake_case_ = value.reshape(UpperCAmelCase )
for path in paths:
snake_case_ = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
snake_case_ = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
snake_case_ = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
snake_case_ = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
snake_case_ = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
snake_case_ = old_checkpoint[path['old']][:, :, 0]
else:
snake_case_ = old_checkpoint[path['old']]
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
snake_case_ = {}
snake_case_ = checkpoint['time_embed.0.weight']
snake_case_ = checkpoint['time_embed.0.bias']
snake_case_ = checkpoint['time_embed.2.weight']
snake_case_ = checkpoint['time_embed.2.bias']
snake_case_ = checkpoint['input_blocks.0.0.weight']
snake_case_ = checkpoint['input_blocks.0.0.bias']
snake_case_ = checkpoint['out.0.weight']
snake_case_ = checkpoint['out.0.bias']
snake_case_ = checkpoint['out.2.weight']
snake_case_ = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'input_blocks.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
# Retrieves the keys for the middle blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'middle_block.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
# Retrieves the keys for the output blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'output_blocks.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
for i in range(1 , UpperCAmelCase ):
snake_case_ = (i - 1) // (config['num_res_blocks'] + 1)
snake_case_ = (i - 1) % (config['num_res_blocks'] + 1)
snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.0' in key]
snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.1' in key]
if f'input_blocks.{i}.0.op.weight' in checkpoint:
snake_case_ = checkpoint[
f'input_blocks.{i}.0.op.weight'
]
snake_case_ = checkpoint[
f'input_blocks.{i}.0.op.bias'
]
continue
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = {'old': f'input_blocks.{i}.0', 'new': f'down_blocks.{block_id}.resnets.{layer_in_block_id}'}
snake_case_ = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path, resnet_op] , config=UpperCAmelCase )
if len(UpperCAmelCase ):
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'old': f'input_blocks.{i}.1',
'new': f'down_blocks.{block_id}.attentions.{layer_in_block_id}',
}
snake_case_ = {
f'input_blocks.{i}.1.qkv.bias': {
'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'input_blocks.{i}.1.qkv.weight': {
'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase , )
snake_case_ = middle_blocks[0]
snake_case_ = middle_blocks[1]
snake_case_ = middle_blocks[2]
snake_case_ = renew_resnet_paths(UpperCAmelCase )
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase )
snake_case_ = renew_resnet_paths(UpperCAmelCase )
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase )
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase )
for i in range(UpperCAmelCase ):
snake_case_ = i // (config['num_res_blocks'] + 1)
snake_case_ = i % (config['num_res_blocks'] + 1)
snake_case_ = [shave_segments(UpperCAmelCase , 2 ) for name in output_blocks[i]]
snake_case_ = {}
for layer in output_block_layers:
snake_case_ , snake_case_ = layer.split('.' )[0], shave_segments(UpperCAmelCase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(UpperCAmelCase )
else:
snake_case_ = [layer_name]
if len(UpperCAmelCase ) > 1:
snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.0' in key]
snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.1' in key]
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = {'old': f'output_blocks.{i}.0', 'new': f'up_blocks.{block_id}.resnets.{layer_in_block_id}'}
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , config=UpperCAmelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
snake_case_ = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
snake_case_ = checkpoint[
f'output_blocks.{i}.{index}.conv.weight'
]
snake_case_ = checkpoint[
f'output_blocks.{i}.{index}.conv.bias'
]
# Clear attentions as they have been attributed above.
if len(UpperCAmelCase ) == 2:
snake_case_ = []
if len(UpperCAmelCase ):
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'old': f'output_blocks.{i}.1',
'new': f'up_blocks.{block_id}.attentions.{layer_in_block_id}',
}
snake_case_ = {
f'output_blocks.{i}.1.qkv.bias': {
'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'output_blocks.{i}.1.qkv.weight': {
'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=UpperCAmelCase , )
else:
snake_case_ = renew_resnet_paths(UpperCAmelCase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
snake_case_ = '.'.join(['output_blocks', str(UpperCAmelCase ), path['old']] )
snake_case_ = '.'.join(['up_blocks', str(UpperCAmelCase ), 'resnets', str(UpperCAmelCase ), path['new']] )
snake_case_ = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__UpperCamelCase = json.loads(f.read())
__UpperCamelCase = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__UpperCamelCase = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__UpperCamelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__UpperCamelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__UpperCamelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 312 | 0 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A_ = logging.get_logger(__name__)
A_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A_ = {
'''vocab_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/vocab.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/vocab.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/vocab.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/vocab.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/vocab.json''',
},
'''merges_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/merges.txt''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/merges.txt''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/merges.txt''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/merges.txt''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/tokenizer.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/tokenizer.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/tokenizer.json''',
},
}
A_ = {
'''gpt2''': 10_24,
'''gpt2-medium''': 10_24,
'''gpt2-large''': 10_24,
'''gpt2-xl''': 10_24,
'''distilgpt2''': 10_24,
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["input_ids", "attention_mask"]
lowercase__ = GPTaTokenizer
def __init__( self: Any, a_: Optional[Any]=None, a_: Tuple=None, a_: Optional[int]=None, a_: Dict="<|endoftext|>", a_: Tuple="<|endoftext|>", a_: Optional[Any]="<|endoftext|>", a_: List[str]=False, **a_: Any, ):
'''simple docstring'''
super().__init__(
a_, a_, tokenizer_file=a_, unk_token=a_, bos_token=a_, eos_token=a_, add_prefix_space=a_, **a_, )
_snake_case : List[Any] = kwargs.pop("""add_bos_token""", a_ )
_snake_case : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""", a_ ) != add_prefix_space:
_snake_case : Any = getattr(a_, pre_tok_state.pop("""type""" ) )
_snake_case : Union[str, Any] = add_prefix_space
_snake_case : Optional[int] = pre_tok_class(**a_ )
_snake_case : Any = add_prefix_space
def UpperCamelCase_ ( self: Tuple, *a_: List[str], **a_: Tuple ):
'''simple docstring'''
_snake_case : Optional[Any] = kwargs.get("""is_split_into_words""", a_ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*a_, **a_ )
def UpperCamelCase_ ( self: Optional[Any], *a_: Optional[Any], **a_: Dict ):
'''simple docstring'''
_snake_case : Dict = kwargs.get("""is_split_into_words""", a_ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*a_, **a_ )
def UpperCamelCase_ ( self: List[Any], a_: str, a_: Optional[str] = None ):
'''simple docstring'''
_snake_case : Any = self._tokenizer.model.save(a_, name=a_ )
return tuple(a_ )
def UpperCamelCase_ ( self: int, a_: "Conversation" ):
'''simple docstring'''
_snake_case : Any = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a_, add_special_tokens=a_ ) + [self.eos_token_id] )
if len(a_ ) > self.model_max_length:
_snake_case : List[Any] = input_ids[-self.model_max_length :]
return input_ids
| 64 |
'''simple docstring'''
import math
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def UpperCamelCase__ ( self : List[str] , __a : list[list[float]] , __a : list[int] ):
_a = 0.0
_a = 0.0
for i in range(len(__a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def UpperCamelCase__ ( self : List[Any] , __a : list[list[int | float]] , __a : list[int] , __a : int , __a : float ):
for i in range(len(__a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def _lowerCamelCase ( ) -> None:
# Training Examples ( m, n )
_a = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_a = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_a = SelfOrganizingMap()
_a = 3
_a = 0.5
for _ in range(lowercase ):
for j in range(len(lowercase ) ):
# training sample
_a = training_samples[j]
# Compute the winning vector
_a = self_organizing_map.get_winner(lowercase , lowercase )
# Update the winning vector
_a = self_organizing_map.update(lowercase , lowercase , lowercase , lowercase )
# classify test sample
_a = [0, 0, 0, 1]
_a = self_organizing_map.get_winner(lowercase , lowercase )
# results
print(F'Clusters that the test sample belongs to : {winner}' )
print(F'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 63 | 0 |
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def _A (lowerCAmelCase__ :Dict , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Any=5 ) -> str:
'''simple docstring'''
assert masked_input.count('<mask>' ) == 1
_a = torch.tensor(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) ).unsqueeze(0 ) # Batch size 1
_a = model(lowerCAmelCase__ )[0] # The last hidden-state is the first element of the output tuple
_a = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
_a = logits[0, masked_index, :]
_a = logits.softmax(dim=0 )
_a , _a = prob.topk(k=lowerCAmelCase__ , dim=0 )
_a = ' '.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(lowerCAmelCase__ ) )] )
_a = tokenizer.mask_token
_a = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(' ' ) ):
_a = predicted_token_bpe.replace('\u2581' , ' ' )
if " {0}".format(lowerCAmelCase__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(' {0}'.format(lowerCAmelCase__ ) , lowerCAmelCase__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(lowerCAmelCase__ , lowerCAmelCase__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
a_ : str = CamembertTokenizer.from_pretrained("camembert-base")
a_ : List[str] = CamembertForMaskedLM.from_pretrained("camembert-base")
model.eval()
a_ : int = "Le camembert est <mask> :)"
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 104 |
'''simple docstring'''
def _A (lowerCAmelCase__ :list ) -> float:
'''simple docstring'''
_a = 0
while len(lowerCAmelCase__ ) > 1:
_a = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
_a = files.index(min(lowerCAmelCase__ ) )
temp += files[min_index]
files.pop(lowerCAmelCase__ )
files.append(lowerCAmelCase__ )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 | 1 |
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
UpperCAmelCase__ = 637_8137.0
UpperCAmelCase__ = 635_6752.31_4245
UpperCAmelCase__ = 637_8137
def A ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
_UpperCAmelCase = atan((1 - flattening) * tan(radians(_lowerCAmelCase ) ) )
_UpperCAmelCase = atan((1 - flattening) * tan(radians(_lowerCAmelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
_UpperCAmelCase = haversine_distance(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
_UpperCAmelCase = (b_lata + b_lata) / 2
_UpperCAmelCase = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
_UpperCAmelCase = (sin(_lowerCAmelCase ) ** 2) * (cos(_lowerCAmelCase ) ** 2)
_UpperCAmelCase = cos(sigma / 2 ) ** 2
_UpperCAmelCase = (sigma - sin(_lowerCAmelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
_UpperCAmelCase = (cos(_lowerCAmelCase ) ** 2) * (sin(_lowerCAmelCase ) ** 2)
_UpperCAmelCase = sin(sigma / 2 ) ** 2
_UpperCAmelCase = (sigma + sin(_lowerCAmelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 339 |
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
__snake_case = [
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'''
''' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'''
''' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.''',
'''The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'''
''' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'''
''' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'''
''' body.''',
'''Amnesty International releases its annual report on the death penalty. The report catalogs the use of'''
''' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'''
''' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'''
''' punishment.''',
]
__snake_case = [
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'''
''' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'''
''' had informed his Lufthansa training school of an episode of severe depression, airline says .''',
'''Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'''
''' Israel and the United States opposed the move, which could open the door to war crimes investigations against'''
''' Israelis .''',
'''Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'''
''' death . Organization claims that governments around the world are using the threat of terrorism to advance'''
''' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'''
''' sentences up by 28% .''',
]
def A_ ( ):
"""simple docstring"""
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, bootstrap_aggregation=_lowerCAmelCase, rouge_keys=['''rouge2''', '''rougeL'''] )
assert isinstance(_lowerCAmelCase, _lowerCAmelCase )
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, bootstrap_aggregation=_lowerCAmelCase, rouge_keys=['''rouge2'''] )
assert (
pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean()
)
def A_ ( ):
"""simple docstring"""
_a = '''rougeLsum'''
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=[k] )[k]
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=[k] )[k]
assert score > score_no_sep
def A_ ( ):
"""simple docstring"""
_a = ['''rouge1''', '''rouge2''', '''rougeL''']
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=_lowerCAmelCase )
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=_lowerCAmelCase )
assert score_sep == score_no_sep
def A_ ( ):
"""simple docstring"""
_a = [
'''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''',
]
_a = [
'''Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'''
''' the final seconds on board Flight 9525.''',
]
assert calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase ) == calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase )
def A_ ( ):
"""simple docstring"""
_a = [
'''" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '''
]
_a = [
''' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'''
]
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, rouge_keys=['''rougeLsum'''], newline_sep=_lowerCAmelCase )['''rougeLsum''']
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, rouge_keys=['''rougeLsum'''] )['''rougeLsum''']
assert new_score > prev_score
def A_ ( ):
"""simple docstring"""
_a = Path('''examples/seq2seq/test_data/wmt_en_ro''' )
_a = calculate_rouge_path(data_dir.joinpath('''test.source''' ), data_dir.joinpath('''test.target''' ) )
assert isinstance(_lowerCAmelCase, _lowerCAmelCase )
_a = calculate_rouge_path(
data_dir.joinpath('''test.source''' ), data_dir.joinpath('''test.target''' ), bootstrap_aggregation=_lowerCAmelCase )
assert isinstance(_lowerCAmelCase, _lowerCAmelCase )
| 320 | 0 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase__ ( __lowercase ):
'''simple docstring'''
UpperCamelCase = ["image_processor", "tokenizer"]
UpperCamelCase = "ViltImageProcessor"
UpperCamelCase = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Tuple , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Dict=None , **_UpperCAmelCase : int ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _a , )
UpperCAmelCase_ = kwargs.pop("feature_extractor" )
UpperCAmelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_a , _a )
UpperCAmelCase_ = self.image_processor
def __call__( self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] = None , _UpperCAmelCase : List[Any] = True , _UpperCAmelCase : List[Any] = False , _UpperCAmelCase : Any = None , _UpperCAmelCase : Union[str, Any] = None , _UpperCAmelCase : Dict = 0 , _UpperCAmelCase : Dict = None , _UpperCAmelCase : str = None , _UpperCAmelCase : Optional[Any] = None , _UpperCAmelCase : Optional[int] = False , _UpperCAmelCase : Any = False , _UpperCAmelCase : List[str] = False , _UpperCAmelCase : Dict = False , _UpperCAmelCase : Tuple = True , _UpperCAmelCase : List[str] = None , **_UpperCAmelCase : Union[str, Any] , ) -> BatchEncoding:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel_values + pixel_mask
UpperCAmelCase_ = self.image_processor(_a , return_tensors=_a )
encoding.update(_a )
return encoding
def lowercase__ ( self : Optional[int] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_a , **_a )
def lowercase__ ( self : Union[str, Any] , *_UpperCAmelCase : Dict , **_UpperCAmelCase : int ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*_a , **_a )
@property
def lowercase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer.model_input_names
UpperCAmelCase_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowercase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _a , )
return self.image_processor_class
@property
def lowercase__ ( self : Tuple ) -> Any:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _a , )
return self.image_processor
| 365 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowercase__ :
'''simple docstring'''
def __init__( self : Any , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : Any=True , _UpperCAmelCase : int=False , _UpperCAmelCase : str=10 , _UpperCAmelCase : int=3 , _UpperCAmelCase : Optional[int]=32 * 8 , _UpperCAmelCase : str=32 * 8 , _UpperCAmelCase : Any=4 , _UpperCAmelCase : List[Any]=64 , ) -> str:
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_auxiliary_loss
UpperCAmelCase_ = num_queries
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_size
UpperCAmelCase_ = max_size
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = hidden_dim
UpperCAmelCase_ = hidden_dim
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_UpperCAmelCase )
UpperCAmelCase_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_UpperCAmelCase )
UpperCAmelCase_ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_UpperCAmelCase ) > 0.5
).float()
UpperCAmelCase_ = (torch.rand((self.batch_size, self.num_labels) , device=_UpperCAmelCase ) > 0.5).long()
UpperCAmelCase_ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
UpperCAmelCase_ = self.num_queries
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = [1, 1, 1, 1]
UpperCAmelCase_ = self.num_channels
UpperCAmelCase_ = 64
UpperCAmelCase_ = 128
UpperCAmelCase_ = self.hidden_dim
UpperCAmelCase_ = self.hidden_dim
UpperCAmelCase_ = self.hidden_dim
return config
def lowercase__ ( self : Dict ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = output.encoder_hidden_states
UpperCAmelCase_ = output.pixel_decoder_hidden_states
UpperCAmelCase_ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCAmelCase ) , config.decoder_layers )
def lowercase__ ( self : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int]=False ) -> str:
'''simple docstring'''
with torch.no_grad():
UpperCAmelCase_ = MaskaFormerModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase )
UpperCAmelCase_ = model(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_UpperCAmelCase , _UpperCAmelCase )
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = MaskaFormerForUniversalSegmentation(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
def comm_check_on_output(_UpperCAmelCase : List[Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCAmelCase_ = model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase )
UpperCAmelCase_ = model(_UpperCAmelCase )
comm_check_on_output(_UpperCAmelCase )
UpperCAmelCase_ = model(
pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase )
comm_check_on_output(_UpperCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowercase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
UpperCamelCase = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = MaskaFormerModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : Dict ) -> str:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
def lowercase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_UpperCAmelCase )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def lowercase__ ( self : Tuple ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def lowercase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def lowercase__ ( self : str ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowercase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowercase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
pass
def lowercase__ ( self : Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_UpperCAmelCase )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
@slow
def lowercase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
UpperCAmelCase_ = MaskaFormerModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def lowercase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = (self.model_tester.min_size,) * 2
UpperCAmelCase_ = {
"pixel_values": torch.randn((2, 3, *size) , device=_UpperCAmelCase ),
"mask_labels": torch.randn((2, 10, *size) , device=_UpperCAmelCase ),
"class_labels": torch.zeros(2 , 10 , device=_UpperCAmelCase ).long(),
}
UpperCAmelCase_ = self.model_tester.get_config()
UpperCAmelCase_ = MaskaFormerForUniversalSegmentation(_UpperCAmelCase ).to(_UpperCAmelCase )
UpperCAmelCase_ = model(**_UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_UpperCAmelCase ).to(_UpperCAmelCase )
UpperCAmelCase_ = model(**_UpperCAmelCase , output_attentions=_UpperCAmelCase )
self.assertTrue(outputs.attentions is not None )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCAmelCase_ = self.all_model_classes[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
UpperCAmelCase_ = model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase ).loss
loss.backward()
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.all_model_classes[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(_UpperCAmelCase ).to(_UpperCAmelCase )
model.train()
UpperCAmelCase_ = model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase )
UpperCAmelCase_ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCAmelCase_ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
UpperCAmelCase_ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCAmelCase_ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_UpperCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase = 1e-4
def a__ ( ):
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowercase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
UpperCAmelCase_ = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
UpperCAmelCase_ = model(**_UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
UpperCAmelCase_ = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
UpperCAmelCase_ = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def lowercase__ ( self : int ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase ).eval()
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
UpperCAmelCase_ = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
UpperCAmelCase_ = model(**_UpperCAmelCase )
# masks_queries_logits
UpperCAmelCase_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
UpperCAmelCase_ = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
UpperCAmelCase_ = torch.tensor(_UpperCAmelCase ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
# class_queries_logits
UpperCAmelCase_ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
UpperCAmelCase_ = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase ).eval()
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , )
UpperCAmelCase_ = inputs["pixel_values"].to(_UpperCAmelCase )
UpperCAmelCase_ = [el.to(_UpperCAmelCase ) for el in inputs["mask_labels"]]
UpperCAmelCase_ = [el.to(_UpperCAmelCase ) for el in inputs["class_labels"]]
with torch.no_grad():
UpperCAmelCase_ = model(**_UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
| 241 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
a_ : int = logging.get_logger(__name__)
a_ : List[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a_ : Tuple = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
a_ : Optional[int] = {
'bert-base-uncased': 5_12,
'bert-large-uncased': 5_12,
'bert-base-cased': 5_12,
'bert-large-cased': 5_12,
'bert-base-multilingual-uncased': 5_12,
'bert-base-multilingual-cased': 5_12,
'bert-base-chinese': 5_12,
'bert-base-german-cased': 5_12,
'bert-large-uncased-whole-word-masking': 5_12,
'bert-large-cased-whole-word-masking': 5_12,
'bert-large-uncased-whole-word-masking-finetuned-squad': 5_12,
'bert-large-cased-whole-word-masking-finetuned-squad': 5_12,
'bert-base-cased-finetuned-mrpc': 5_12,
'bert-base-german-dbmdz-cased': 5_12,
'bert-base-german-dbmdz-uncased': 5_12,
'TurkuNLP/bert-base-finnish-cased-v1': 5_12,
'TurkuNLP/bert-base-finnish-uncased-v1': 5_12,
'wietsedv/bert-base-dutch-cased': 5_12,
}
a_ : Any = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class _snake_case ( A__ ):
_lowercase : Dict = VOCAB_FILES_NAMES
_lowercase : int = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Dict = PRETRAINED_INIT_CONFIGURATION
_lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Tuple = BertTokenizer
def __init__( self , a=None , a=None , a=True , a="[UNK]" , a="[SEP]" , a="[PAD]" , a="[CLS]" , a="[MASK]" , a=True , a=None , **a , ) -> Tuple:
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('lowercase' , a) != do_lower_case
or normalizer_state.get('strip_accents' , a) != strip_accents
or normalizer_state.get('handle_chinese_chars' , a) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE = getattr(a , normalizer_state.pop('type'))
SCREAMING_SNAKE_CASE = do_lower_case
SCREAMING_SNAKE_CASE = strip_accents
SCREAMING_SNAKE_CASE = tokenize_chinese_chars
SCREAMING_SNAKE_CASE = normalizer_class(**a)
SCREAMING_SNAKE_CASE = do_lower_case
def SCREAMING_SNAKE_CASE__ ( self , a , a=None) -> Dict:
SCREAMING_SNAKE_CASE = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> List[int]:
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> Tuple[str]:
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(a , name=a)
return tuple(a)
| 137 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
a_ : Optional[int] = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _snake_case ( datasets.BuilderConfig ):
_lowercase : Optional[datasets.Features] = None
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , ):
import pyspark
def generate_fn():
SCREAMING_SNAKE_CASE = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id'))
for partition_id in partition_order:
SCREAMING_SNAKE_CASE = df_with_partition_id.select('*').where(F'''part_id = {partition_id}''').drop('part_id')
SCREAMING_SNAKE_CASE = partition_df.collect()
SCREAMING_SNAKE_CASE = 0
for row in rows:
yield F'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class _snake_case ( _BaseExamplesIterable ):
def __init__( self , a , a=None , ) -> Tuple:
SCREAMING_SNAKE_CASE = df
SCREAMING_SNAKE_CASE = partition_order or range(self.df.rdd.getNumPartitions())
SCREAMING_SNAKE_CASE = _generate_iterable_examples(self.df , self.partition_order)
def __iter__( self) -> Dict:
yield from self.generate_examples_fn()
def SCREAMING_SNAKE_CASE__ ( self , a) -> "SparkExamplesIterable":
SCREAMING_SNAKE_CASE = list(range(self.df.rdd.getNumPartitions()))
generator.shuffle(a)
return SparkExamplesIterable(self.df , partition_order=a)
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> "SparkExamplesIterable":
SCREAMING_SNAKE_CASE = self.split_shard_indices_by_worker(a , a)
return SparkExamplesIterable(self.df , partition_order=a)
@property
def SCREAMING_SNAKE_CASE__ ( self) -> int:
return len(self.partition_order)
class _snake_case ( datasets.DatasetBuilder ):
_lowercase : int = SparkConfig
def __init__( self , a , a = None , a = None , **a , ) -> List[str]:
import pyspark
SCREAMING_SNAKE_CASE = pyspark.sql.SparkSession.builder.getOrCreate()
SCREAMING_SNAKE_CASE = df
SCREAMING_SNAKE_CASE = working_dir
super().__init__(
cache_dir=a , config_name=str(self.df.semanticHash()) , **a , )
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
# Returns the path of the created file.
def create_cache_and_write_probe(a):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=a)
SCREAMING_SNAKE_CASE = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex)
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(a , 'a')
return [probe_file]
if self._spark.conf.get('spark.master' , '').startswith('local'):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
SCREAMING_SNAKE_CASE = (
self._spark.sparkContext.parallelize(range(1) , 1).mapPartitions(a).collect()
)
if os.path.isfile(probe[0]):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir')
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
return datasets.DatasetInfo(features=self.config.features)
def SCREAMING_SNAKE_CASE__ ( self , a) -> Dict:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN)]
def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[int]:
import pyspark
def get_arrow_batch_size(a):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]})
SCREAMING_SNAKE_CASE = self.df.count()
SCREAMING_SNAKE_CASE = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
SCREAMING_SNAKE_CASE = (
self.df.limit(a)
.repartition(1)
.mapInArrow(a , 'batch_bytes: long')
.agg(pyspark.sql.functions.sum('batch_bytes').alias('sample_bytes'))
.collect()[0]
.sample_bytes
/ sample_num_rows
)
SCREAMING_SNAKE_CASE = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
SCREAMING_SNAKE_CASE = min(a , int(approx_total_size / max_shard_size))
SCREAMING_SNAKE_CASE = self.df.repartition(a)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
import pyspark
SCREAMING_SNAKE_CASE = ParquetWriter if file_format == 'parquet' else ArrowWriter
SCREAMING_SNAKE_CASE = os.path.join(self._working_dir , os.path.basename(a)) if self._working_dir else fpath
SCREAMING_SNAKE_CASE = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
SCREAMING_SNAKE_CASE = self.config.features
SCREAMING_SNAKE_CASE = self._writer_batch_size
SCREAMING_SNAKE_CASE = self._fs.storage_options
def write_arrow(a):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
SCREAMING_SNAKE_CASE = pyspark.TaskContext().taskAttemptId()
SCREAMING_SNAKE_CASE = next(a , a)
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , )
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = writer_class(
features=a , path=working_fpath.replace('SSSSS' , f'''{shard_id:05d}''').replace('TTTTT' , f'''{task_id:05d}''') , writer_batch_size=a , storage_options=a , embed_local_files=a , )
SCREAMING_SNAKE_CASE = pa.Table.from_batches([first_batch])
writer.write_table(a)
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
shard_id += 1
SCREAMING_SNAKE_CASE = writer_class(
features=writer._features , path=working_fpath.replace('SSSSS' , f'''{shard_id:05d}''').replace('TTTTT' , f'''{task_id:05d}''') , writer_batch_size=a , storage_options=a , embed_local_files=a , )
SCREAMING_SNAKE_CASE = pa.Table.from_batches([batch])
writer.write_table(a)
if writer._num_bytes > 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(a)):
SCREAMING_SNAKE_CASE = os.path.join(os.path.dirname(a) , os.path.basename(a))
shutil.move(a , a)
SCREAMING_SNAKE_CASE = (
self.df.mapInArrow(a , 'task_id: long, num_examples: long, num_bytes: long')
.groupBy('task_id')
.agg(
pyspark.sql.functions.sum('num_examples').alias('total_num_examples') , pyspark.sql.functions.sum('num_bytes').alias('total_num_bytes') , pyspark.sql.functions.count('num_bytes').alias('num_shards') , pyspark.sql.functions.collect_list('num_examples').alias('shard_lengths') , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def SCREAMING_SNAKE_CASE__ ( self , a , a = "arrow" , a = None , a = None , **a , ) -> List[Any]:
self._validate_cache_dir()
SCREAMING_SNAKE_CASE = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE)
self._repartition_df_if_needed(a)
SCREAMING_SNAKE_CASE = not is_remote_filesystem(self._fs)
SCREAMING_SNAKE_CASE = os.path.join if is_local else posixpath.join
SCREAMING_SNAKE_CASE = '-TTTTT-SSSSS-of-NNNNN'
SCREAMING_SNAKE_CASE = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
SCREAMING_SNAKE_CASE = path_join(self._output_dir , a)
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for task_id, content in self._prepare_split_single(a , a , a):
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards))
all_shard_lengths.extend(a)
SCREAMING_SNAKE_CASE = total_num_examples
SCREAMING_SNAKE_CASE = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''')
if total_shards > 1:
SCREAMING_SNAKE_CASE = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
SCREAMING_SNAKE_CASE = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
a , a , a , ):
rename(
a , fpath.replace('SSSSS' , f'''{shard_id:05d}''').replace('TTTTT' , f'''{task_id:05d}''') , fpath.replace('TTTTT-SSSSS' , f'''{global_shard_id:05d}''').replace('NNNNN' , f'''{total_shards:05d}''') , )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
for i in range(len(a)):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = task_id_and_num_shards[i]
for shard_id in range(a):
args.append([task_id, shard_id, global_shard_id])
global_shard_id += 1
self._spark.sparkContext.parallelize(a , len(a)).map(lambda a: _rename_shard(*a)).collect()
else:
# don't use any pattern
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS' , f'''{shard_id:05d}''').replace('TTTTT' , f'''{task_id:05d}''') , fpath.replace(a , '') , )
def SCREAMING_SNAKE_CASE__ ( self , a , ) -> SparkExamplesIterable:
return SparkExamplesIterable(self.df)
| 137 | 1 |
'''simple docstring'''
def _A ( A__ = 50 ):
"""simple docstring"""
__lowercase = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'{solution() = }')
| 52 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class lowercase_ :
"""simple docstring"""
def __init__( self : List[str] ,lowercase__ : str ,lowercase__ : List[str]=1_3 ,lowercase__ : str=7 ,lowercase__ : Tuple=True ,lowercase__ : Dict=True ,lowercase__ : str=True ,lowercase__ : Optional[Any]=True ,lowercase__ : List[str]=9_9 ,lowercase__ : int=[1, 1, 2] ,lowercase__ : int=1 ,lowercase__ : Tuple=3_2 ,lowercase__ : Union[str, Any]=4 ,lowercase__ : Tuple=8 ,lowercase__ : Any=3_7 ,lowercase__ : Union[str, Any]="gelu_new" ,lowercase__ : Tuple=0.1 ,lowercase__ : int=0.1 ,lowercase__ : Optional[int]=0.0 ,lowercase__ : Union[str, Any]=5_1_2 ,lowercase__ : Dict=3 ,lowercase__ : Union[str, Any]=0.0_2 ,lowercase__ : Any=3 ,lowercase__ : Tuple=4 ,lowercase__ : Dict=None ,lowercase__ : List[Any]=False ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = block_sizes
__lowercase = num_decoder_layers
__lowercase = d_model
__lowercase = n_head
__lowercase = d_head
__lowercase = d_inner
__lowercase = hidden_act
__lowercase = hidden_dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = 2
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
__lowercase = initializer_std
# Used in the tests to check the size of the first attention layer
__lowercase = n_head
# Used in the tests to check the size of the first hidden state
__lowercase = self.d_model
# Used in the tests to check the number of output hidden states/attentions
__lowercase = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
__lowercase = self.num_hidden_layers + 2
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__lowercase = ids_tensor([self.batch_size] ,self.num_choices )
__lowercase = FunnelConfig(
vocab_size=self.vocab_size ,block_sizes=self.block_sizes ,num_decoder_layers=self.num_decoder_layers ,d_model=self.d_model ,n_head=self.n_head ,d_head=self.d_head ,d_inner=self.d_inner ,hidden_act=self.hidden_act ,hidden_dropout=self.hidden_dropout ,attention_dropout=self.attention_dropout ,activation_dropout=self.activation_dropout ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_std=self.initializer_std ,)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : List[Any] ,lowercase__ : Dict ,lowercase__ : Optional[int] ,lowercase__ : List[str] ,lowercase__ : str ,):
__lowercase = TFFunnelModel(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
__lowercase = [input_ids, input_mask]
__lowercase = model(lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
__lowercase = False
__lowercase = TFFunnelModel(config=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
__lowercase = False
__lowercase = TFFunnelModel(config=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Any ,lowercase__ : Any ,lowercase__ : Optional[Any] ,lowercase__ : Tuple ,lowercase__ : Union[str, Any] ,):
__lowercase = TFFunnelBaseModel(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
__lowercase = [input_ids, input_mask]
__lowercase = model(lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 2, self.d_model) )
__lowercase = False
__lowercase = TFFunnelBaseModel(config=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 3, self.d_model) )
__lowercase = False
__lowercase = TFFunnelBaseModel(config=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 2, self.d_model) )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : Optional[int] ,lowercase__ : Optional[Any] ,lowercase__ : Dict ,lowercase__ : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : Tuple ,lowercase__ : Dict ,):
__lowercase = TFFunnelForPreTraining(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Optional[int] ,lowercase__ : List[Any] ,lowercase__ : int ,lowercase__ : List[str] ,lowercase__ : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : Tuple ,):
__lowercase = TFFunnelForMaskedLM(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : str ,lowercase__ : List[Any] ,lowercase__ : int ,lowercase__ : Tuple ,lowercase__ : List[str] ,):
__lowercase = self.num_labels
__lowercase = TFFunnelForSequenceClassification(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : Optional[int] ,lowercase__ : Optional[Any] ,lowercase__ : Any ,lowercase__ : Tuple ,):
__lowercase = self.num_choices
__lowercase = TFFunnelForMultipleChoice(config=lowercase__ )
__lowercase = tf.tile(tf.expand_dims(lowercase__ ,1 ) ,(1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(lowercase__ ,1 ) ,(1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(lowercase__ ,1 ) ,(1, self.num_choices, 1) )
__lowercase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : Any ,lowercase__ : Tuple ,lowercase__ : Any ,lowercase__ : Any ,lowercase__ : Any ,lowercase__ : str ,lowercase__ : Any ,):
__lowercase = self.num_labels
__lowercase = TFFunnelForTokenClassification(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : str ,lowercase__ : Any ,lowercase__ : int ,lowercase__ : Optional[Any] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,):
__lowercase = TFFunnelForQuestionAnswering(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE : int = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : List[str] = False
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = TFFunnelModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase__ )
@require_tf
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Tuple = False
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = TFFunnelModelTester(self ,base=lowercase__ )
__lowercase = ConfigTester(self ,config_class=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase__ )
| 52 | 1 |
lowerCAmelCase__ = 0 # The first color of the flag.
lowerCAmelCase__ = 1 # The second color of the flag.
lowerCAmelCase__ = 2 # The third color of the flag.
lowerCAmelCase__ = (red, white, blue)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: list ) -> list:
'''simple docstring'''
if not sequence:
return []
if len(SCREAMING_SNAKE_CASE_ ) == 1:
return list(SCREAMING_SNAKE_CASE_ )
A__ = 0
A__ = len(SCREAMING_SNAKE_CASE_ ) - 1
A__ = 0
while mid <= high:
if sequence[mid] == colors[0]:
A__ , A__ = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
A__ , A__ = sequence[high], sequence[mid]
high -= 1
else:
A__ = F'The elements inside the sequence must contains only {colors} values'
raise ValueError(SCREAMING_SNAKE_CASE_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = input("""Enter numbers separated by commas:\n""").strip()
lowerCAmelCase__ = [int(item.strip()) for item in user_input.split(""",""")]
print(f"""{dutch_national_flag_sort(unsorted)}""")
| 68 |
"""simple docstring"""
_a : Tuple= 8.3_1_4_4_5_9_8
def __UpperCAmelCase ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ) -> float:
'''simple docstring'''
if temperature < 0:
raise Exception('Temperature cannot be less than 0 K' )
if molar_mass <= 0:
raise Exception('Molar mass cannot be less than or equal to 0 kg/mol' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_a : Any= 300
_a : Optional[Any]= 28
_a : Optional[int]= rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 172 | 0 |
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__lowercase = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _lowercase ( datasets.BuilderConfig ):
"""simple docstring"""
lowercase__ = None
def lowerCAmelCase (__UpperCamelCase : "pyspark.sql.DataFrame" , __UpperCamelCase : List[int] , ):
"""simple docstring"""
import pyspark
def generate_fn():
__UpperCamelCase =df.select('''*''' , pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) )
for partition_id in partition_order:
__UpperCamelCase =df_with_partition_id.select('''*''' ).where(F"""part_id = {partition_id}""" ).drop('''part_id''' )
__UpperCamelCase =partition_df.collect()
__UpperCamelCase =0
for row in rows:
yield F"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class _lowercase ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self : List[str] , UpperCamelCase__ : "pyspark.sql.DataFrame" , UpperCamelCase__ : Optional[int]=None , ) -> int:
'''simple docstring'''
__UpperCamelCase =df
__UpperCamelCase =partition_order or range(self.df.rdd.getNumPartitions() )
__UpperCamelCase =_generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : Any ) -> Optional[int]:
'''simple docstring'''
yield from self.generate_examples_fn()
def UpperCAmelCase_ ( self : Tuple , UpperCamelCase__ : np.random.Generator ) -> "SparkExamplesIterable":
'''simple docstring'''
__UpperCamelCase =list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(UpperCamelCase__ )
return SparkExamplesIterable(self.df , partition_order=UpperCamelCase__ )
def UpperCAmelCase_ ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> "SparkExamplesIterable":
'''simple docstring'''
__UpperCamelCase =self.split_shard_indices_by_worker(UpperCamelCase__ , UpperCamelCase__ )
return SparkExamplesIterable(self.df , partition_order=UpperCamelCase__ )
@property
def UpperCAmelCase_ ( self : str ) -> int:
'''simple docstring'''
return len(self.partition_order )
class _lowercase ( datasets.DatasetBuilder ):
"""simple docstring"""
lowercase__ = SparkConfig
def __init__( self : List[str] , UpperCamelCase__ : "pyspark.sql.DataFrame" , UpperCamelCase__ : str = None , UpperCamelCase__ : str = None , **UpperCamelCase__ : List[str] , ) -> Any:
'''simple docstring'''
import pyspark
__UpperCamelCase =pyspark.sql.SparkSession.builder.getOrCreate()
__UpperCamelCase =df
__UpperCamelCase =working_dir
super().__init__(
cache_dir=UpperCamelCase__ , config_name=str(self.df.semanticHash() ) , **UpperCamelCase__ , )
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
def create_cache_and_write_probe(UpperCamelCase__ : Any ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=UpperCamelCase__ )
__UpperCamelCase =os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(UpperCamelCase__ , '''a''' )
return [probe_file]
if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
__UpperCamelCase =(
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(UpperCamelCase__ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' )
def UpperCAmelCase_ ( self : Dict ) -> List[str]:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase_ ( self : int , UpperCamelCase__ : datasets.download.download_manager.DownloadManager ) -> List[str]:
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def UpperCAmelCase_ ( self : Tuple , UpperCamelCase__ : Any ) -> Any:
'''simple docstring'''
import pyspark
def get_arrow_batch_size(UpperCamelCase__ : Tuple ):
for batch in it:
yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} )
__UpperCamelCase =self.df.count()
__UpperCamelCase =df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
__UpperCamelCase =(
self.df.limit(UpperCamelCase__ )
.repartition(1 )
.mapInArrow(UpperCamelCase__ , '''batch_bytes: long''' )
.agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
__UpperCamelCase =approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
__UpperCamelCase =min(UpperCamelCase__ , int(approx_total_size / max_shard_size ) )
__UpperCamelCase =self.df.repartition(UpperCamelCase__ )
def UpperCAmelCase_ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : int , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
'''simple docstring'''
import pyspark
__UpperCamelCase =ParquetWriter if file_format == '''parquet''' else ArrowWriter
__UpperCamelCase =os.path.join(self._working_dir , os.path.basename(UpperCamelCase__ ) ) if self._working_dir else fpath
__UpperCamelCase =file_format == '''parquet'''
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
__UpperCamelCase =self.config.features
__UpperCamelCase =self._writer_batch_size
__UpperCamelCase =self._fs.storage_options
def write_arrow(UpperCamelCase__ : Any ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
__UpperCamelCase =pyspark.TaskContext().taskAttemptId()
__UpperCamelCase =next(UpperCamelCase__ , UpperCamelCase__ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
__UpperCamelCase =0
__UpperCamelCase =writer_class(
features=UpperCamelCase__ , path=working_fpath.replace('''SSSSS''' , f"""{shard_id:05d}""" ).replace('''TTTTT''' , f"""{task_id:05d}""" ) , writer_batch_size=UpperCamelCase__ , storage_options=UpperCamelCase__ , embed_local_files=UpperCamelCase__ , )
__UpperCamelCase =pa.Table.from_batches([first_batch] )
writer.write_table(UpperCamelCase__ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
__UpperCamelCase , __UpperCamelCase =writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
shard_id += 1
__UpperCamelCase =writer_class(
features=writer._features , path=working_fpath.replace('''SSSSS''' , f"""{shard_id:05d}""" ).replace('''TTTTT''' , f"""{task_id:05d}""" ) , writer_batch_size=UpperCamelCase__ , storage_options=UpperCamelCase__ , embed_local_files=UpperCamelCase__ , )
__UpperCamelCase =pa.Table.from_batches([batch] )
writer.write_table(UpperCamelCase__ )
if writer._num_bytes > 0:
__UpperCamelCase , __UpperCamelCase =writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(UpperCamelCase__ ) ):
__UpperCamelCase =os.path.join(os.path.dirname(UpperCamelCase__ ) , os.path.basename(UpperCamelCase__ ) )
shutil.move(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =(
self.df.mapInArrow(UpperCamelCase__ , '''task_id: long, num_examples: long, num_bytes: long''' )
.groupBy('''task_id''' )
.agg(
pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def UpperCAmelCase_ ( self : List[Any] , UpperCamelCase__ : "datasets.SplitGenerator" , UpperCamelCase__ : str = "arrow" , UpperCamelCase__ : Optional[Union[str, int]] = None , UpperCamelCase__ : Optional[int] = None , **UpperCamelCase__ : List[str] , ) -> Union[str, Any]:
'''simple docstring'''
self._validate_cache_dir()
__UpperCamelCase =convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(UpperCamelCase__ )
__UpperCamelCase =not is_remote_filesystem(self._fs )
__UpperCamelCase =os.path.join if is_local else posixpath.join
__UpperCamelCase ='''-TTTTT-SSSSS-of-NNNNN'''
__UpperCamelCase =f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
__UpperCamelCase =path_join(self._output_dir , UpperCamelCase__ )
__UpperCamelCase =0
__UpperCamelCase =0
__UpperCamelCase =0
__UpperCamelCase =[]
__UpperCamelCase =[]
for task_id, content in self._prepare_split_single(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(UpperCamelCase__ )
__UpperCamelCase =total_num_examples
__UpperCamelCase =total_num_bytes
# should rename everything at the end
logger.debug(f"""Renaming {total_shards} shards.""" )
if total_shards > 1:
__UpperCamelCase =all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
__UpperCamelCase =self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , ):
rename(
UpperCamelCase__ , fpath.replace('''SSSSS''' , f"""{shard_id:05d}""" ).replace('''TTTTT''' , f"""{task_id:05d}""" ) , fpath.replace('''TTTTT-SSSSS''' , f"""{global_shard_id:05d}""" ).replace('''NNNNN''' , f"""{total_shards:05d}""" ) , )
__UpperCamelCase =[]
__UpperCamelCase =0
for i in range(len(UpperCamelCase__ ) ):
__UpperCamelCase , __UpperCamelCase =task_id_and_num_shards[i]
for shard_id in range(UpperCamelCase__ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(UpperCamelCase__ , len(UpperCamelCase__ ) ).map(lambda UpperCamelCase__ : _rename_shard(*UpperCamelCase__ ) ).collect()
else:
# don't use any pattern
__UpperCamelCase =0
__UpperCamelCase =task_id_and_num_shards[0][0]
self._rename(
fpath.replace('''SSSSS''' , f"""{shard_id:05d}""" ).replace('''TTTTT''' , f"""{task_id:05d}""" ) , fpath.replace(UpperCamelCase__ , '''''' ) , )
def UpperCAmelCase_ ( self : Tuple , UpperCamelCase__ : "datasets.SplitGenerator" , ) -> SparkExamplesIterable:
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 85 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = 42
class _lowercase ( __a , __a ):
"""simple docstring"""
@register_to_config
def __init__( self : List[Any] , UpperCamelCase__ : int = 32 , UpperCamelCase__ : int = 64 , UpperCamelCase__ : int = 20 , UpperCamelCase__ : int = 768 , UpperCamelCase__ : str=77 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : str = "silu" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = "linear" , UpperCamelCase__ : Optional[str] = "prd" , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , ) -> Any:
'''simple docstring'''
super().__init__()
__UpperCamelCase =num_attention_heads
__UpperCamelCase =attention_head_dim
__UpperCamelCase =num_attention_heads * attention_head_dim
__UpperCamelCase =additional_embeddings
__UpperCamelCase =time_embed_dim or inner_dim
__UpperCamelCase =embedding_proj_dim or embedding_dim
__UpperCamelCase =clip_embed_dim or embedding_dim
__UpperCamelCase =Timesteps(UpperCamelCase__ , UpperCamelCase__ , 0 )
__UpperCamelCase =TimestepEmbedding(UpperCamelCase__ , UpperCamelCase__ , out_dim=UpperCamelCase__ , act_fn=UpperCamelCase__ )
__UpperCamelCase =nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
if embedding_proj_norm_type is None:
__UpperCamelCase =None
elif embedding_proj_norm_type == "layer":
__UpperCamelCase =nn.LayerNorm(UpperCamelCase__ )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
__UpperCamelCase =nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
if encoder_hid_proj_type is None:
__UpperCamelCase =None
elif encoder_hid_proj_type == "linear":
__UpperCamelCase =nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
__UpperCamelCase =nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase__ ) )
if added_emb_type == "prd":
__UpperCamelCase =nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase__ ) )
elif added_emb_type is None:
__UpperCamelCase =None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
__UpperCamelCase =nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , dropout=UpperCamelCase__ , activation_fn='''gelu''' , attention_bias=UpperCamelCase__ , )
for d in range(UpperCamelCase__ )
] )
if norm_in_type == "layer":
__UpperCamelCase =nn.LayerNorm(UpperCamelCase__ )
elif norm_in_type is None:
__UpperCamelCase =None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
__UpperCamelCase =nn.LayerNorm(UpperCamelCase__ )
__UpperCamelCase =nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
__UpperCamelCase =causal_attention_mask[None, ...]
self.register_buffer('''causal_attention_mask''' , UpperCamelCase__ , persistent=UpperCamelCase__ )
__UpperCamelCase =nn.Parameter(torch.zeros(1 , UpperCamelCase__ ) )
__UpperCamelCase =nn.Parameter(torch.zeros(1 , UpperCamelCase__ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCAmelCase_ ( self : Any ) -> Dict[str, AttentionProcessor]:
'''simple docstring'''
__UpperCamelCase ={}
def fn_recursive_add_processors(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : Dict[str, AttentionProcessor] ):
if hasattr(UpperCamelCase__ , '''set_processor''' ):
__UpperCamelCase =module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , UpperCamelCase__ , UpperCamelCase__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return processors
def UpperCAmelCase_ ( self : int , UpperCamelCase__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase =len(self.attn_processors.keys() )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(UpperCamelCase__ )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : int ):
if hasattr(UpperCamelCase__ , '''set_processor''' ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
module.set_processor(UpperCamelCase__ )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , UpperCamelCase__ , UpperCamelCase__ )
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def UpperCAmelCase_ ( self : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[torch.Tensor, float, int] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.BoolTensor] = None , UpperCamelCase__ : bool = True , ) -> Tuple:
'''simple docstring'''
__UpperCamelCase =hidden_states.shape[0]
__UpperCamelCase =timestep
if not torch.is_tensor(UpperCamelCase__ ):
__UpperCamelCase =torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(UpperCamelCase__ ) and len(timesteps.shape ) == 0:
__UpperCamelCase =timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCamelCase =timesteps * torch.ones(UpperCamelCase__ , dtype=timesteps.dtype , device=timesteps.device )
__UpperCamelCase =self.time_proj(UpperCamelCase__ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__UpperCamelCase =timesteps_projected.to(dtype=self.dtype )
__UpperCamelCase =self.time_embedding(UpperCamelCase__ )
if self.embedding_proj_norm is not None:
__UpperCamelCase =self.embedding_proj_norm(UpperCamelCase__ )
__UpperCamelCase =self.embedding_proj(UpperCamelCase__ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__UpperCamelCase =self.encoder_hidden_states_proj(UpperCamelCase__ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('''`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set''' )
__UpperCamelCase =self.proj_in(UpperCamelCase__ )
__UpperCamelCase =self.positional_embedding.to(hidden_states.dtype )
__UpperCamelCase =[]
__UpperCamelCase =0
if encoder_hidden_states is not None:
additional_embeds.append(UpperCamelCase__ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
__UpperCamelCase =proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
__UpperCamelCase =hidden_states[:, None, :]
__UpperCamelCase =additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__UpperCamelCase =self.prd_embedding.to(hidden_states.dtype ).expand(UpperCamelCase__ , -1 , -1 )
additional_embeds.append(UpperCamelCase__ )
__UpperCamelCase =torch.cat(
UpperCamelCase__ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__UpperCamelCase =additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__UpperCamelCase =F.pad(
UpperCamelCase__ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__UpperCamelCase =hidden_states + positional_embeddings
if attention_mask is not None:
__UpperCamelCase =(1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
__UpperCamelCase =F.pad(UpperCamelCase__ , (0, self.additional_embeddings) , value=0.0 )
__UpperCamelCase =(attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
__UpperCamelCase =attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
__UpperCamelCase =self.norm_in(UpperCamelCase__ )
for block in self.transformer_blocks:
__UpperCamelCase =block(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
__UpperCamelCase =self.norm_out(UpperCamelCase__ )
if self.prd_embedding is not None:
__UpperCamelCase =hidden_states[:, -1]
else:
__UpperCamelCase =hidden_states[:, additional_embeddings_len:]
__UpperCamelCase =self.proj_to_clip_embeddings(UpperCamelCase__ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase__ )
def UpperCAmelCase_ ( self : Dict , UpperCamelCase__ : int ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase =(prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 85 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
lowerCamelCase : str = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[List[ImageInput]]:
if isinstance(lowercase ,(list, tuple) ) and isinstance(videos[0] ,(list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowercase ,(list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowercase ):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""" )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""pixel_values"""]
def __init__( self , A = True , A = None , A = PILImageResampling.BILINEAR , A = True , A = None , A = True , A = 1 / 2_5_5 , A = True , A = True , A = None , A = None , **A , ) -> None:
super().__init__(**A )
snake_case : Any = size if size is not None else {"""shortest_edge""": 2_5_6}
snake_case : Dict = get_size_dict(A , default_to_square=A )
snake_case : str = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
snake_case : Dict = get_size_dict(A , param_name="""crop_size""" )
snake_case : int = do_resize
snake_case : Tuple = size
snake_case : List[Any] = do_center_crop
snake_case : Tuple = crop_size
snake_case : Tuple = resample
snake_case : List[Any] = do_rescale
snake_case : int = rescale_factor
snake_case : List[Any] = offset
snake_case : Any = do_normalize
snake_case : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase ( self , A , A , A = PILImageResampling.BILINEAR , A = None , **A , ) -> np.ndarray:
snake_case : Optional[Any] = get_size_dict(A , default_to_square=A )
if "shortest_edge" in size:
snake_case : Tuple = get_resize_output_image_size(A , size["""shortest_edge"""] , default_to_square=A )
elif "height" in size and "width" in size:
snake_case : Tuple = (size["""height"""], size["""width"""])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(A , size=A , resample=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> np.ndarray:
snake_case : Optional[Any] = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(A , size=(size["""height"""], size["""width"""]) , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A = True , A = None , **A , ) -> int:
snake_case : List[str] = image.astype(np.floataa )
if offset:
snake_case : Optional[Any] = image - (scale / 2)
return rescale(A , scale=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A , A = None , **A , ) -> np.ndarray:
return normalize(A , mean=A , std=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
if offset and not do_rescale:
raise ValueError("""For offset, do_rescale must also be set to True.""" )
# All transformations expect numpy arrays.
snake_case : str = to_numpy_array(A )
if do_resize:
snake_case : Any = self.resize(image=A , size=A , resample=A )
if do_center_crop:
snake_case : List[Any] = self.center_crop(A , size=A )
if do_rescale:
snake_case : Any = self.rescale(image=A , scale=A , offset=A )
if do_normalize:
snake_case : Dict = self.normalize(image=A , mean=A , std=A )
snake_case : Optional[int] = to_channel_dimension_format(A , A )
return image
def UpperCAmelCase ( self , A , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image:
snake_case : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
snake_case : int = resample if resample is not None else self.resample
snake_case : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : int = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : Tuple = offset if offset is not None else self.offset
snake_case : Dict = do_normalize if do_normalize is not None else self.do_normalize
snake_case : Dict = image_mean if image_mean is not None else self.image_mean
snake_case : str = image_std if image_std is not None else self.image_std
snake_case : int = size if size is not None else self.size
snake_case : Union[str, Any] = get_size_dict(A , default_to_square=A )
snake_case : Dict = crop_size if crop_size is not None else self.crop_size
snake_case : Union[str, Any] = get_size_dict(A , param_name="""crop_size""" )
if not valid_images(A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
snake_case : Any = make_batched(A )
snake_case : Union[str, Any] = [
[
self._preprocess_image(
image=A , do_resize=A , size=A , resample=A , do_center_crop=A , crop_size=A , do_rescale=A , rescale_factor=A , offset=A , do_normalize=A , image_mean=A , image_std=A , data_format=A , )
for img in video
]
for video in videos
]
snake_case : List[str] = {"""pixel_values""": videos}
return BatchFeature(data=A , tensor_type=A )
| 124 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowercase :
"""simple docstring"""
def __init__( self , A , A=1_3 , A=7 , A=True , A=True , A=True , A=True , A=9_9 , A=3_2 , A=5 , A=4 , A=3_7 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=1_6 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> Dict:
snake_case : Any = parent
snake_case : List[Any] = batch_size
snake_case : List[Any] = seq_length
snake_case : Dict = is_training
snake_case : List[str] = use_input_mask
snake_case : List[str] = use_token_type_ids
snake_case : Dict = use_labels
snake_case : Optional[int] = vocab_size
snake_case : Optional[int] = hidden_size
snake_case : Optional[Any] = num_hidden_layers
snake_case : Optional[Any] = num_attention_heads
snake_case : Union[str, Any] = intermediate_size
snake_case : List[Any] = hidden_act
snake_case : int = hidden_dropout_prob
snake_case : str = attention_probs_dropout_prob
snake_case : List[Any] = max_position_embeddings
snake_case : List[Any] = type_vocab_size
snake_case : int = type_sequence_label_size
snake_case : Optional[int] = initializer_range
snake_case : Union[str, Any] = num_labels
snake_case : List[str] = num_choices
snake_case : Optional[int] = scope
def UpperCAmelCase ( self ) -> int:
snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : Union[str, Any] = None
if self.use_input_mask:
snake_case : str = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : Tuple = None
if self.use_token_type_ids:
snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case : Optional[int] = None
snake_case : str = None
snake_case : List[Any] = None
if self.use_labels:
snake_case : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : int = ids_tensor([self.batch_size] , self.num_choices )
snake_case : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self ) -> Any:
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> List[str]:
snake_case : Tuple = NystromformerModel(config=A )
model.to(A )
model.eval()
snake_case : str = model(A , attention_mask=A , token_type_ids=A )
snake_case : List[str] = model(A , token_type_ids=A )
snake_case : Any = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> Union[str, Any]:
snake_case : List[Any] = NystromformerForMaskedLM(config=A )
model.to(A )
model.eval()
snake_case : List[Any] = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> Optional[Any]:
snake_case : List[Any] = NystromformerForQuestionAnswering(config=A )
model.to(A )
model.eval()
snake_case : Dict = model(
A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> Tuple:
snake_case : Optional[int] = self.num_labels
snake_case : Union[str, Any] = NystromformerForSequenceClassification(A )
model.to(A )
model.eval()
snake_case : Union[str, Any] = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> int:
snake_case : Union[str, Any] = self.num_labels
snake_case : Union[str, Any] = NystromformerForTokenClassification(config=A )
model.to(A )
model.eval()
snake_case : int = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> Any:
snake_case : List[Any] = self.num_choices
snake_case : Union[str, Any] = NystromformerForMultipleChoice(config=A )
model.to(A )
model.eval()
snake_case : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : List[str] = model(
A , attention_mask=A , token_type_ids=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Tuple = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : Any = config_and_inputs
snake_case : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowercase (UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
_snake_case = (
{
"""feature-extraction""": NystromformerModel,
"""fill-mask""": NystromformerForMaskedLM,
"""question-answering""": NystromformerForQuestionAnswering,
"""text-classification""": NystromformerForSequenceClassification,
"""token-classification""": NystromformerForTokenClassification,
"""zero-shot""": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case = False
_snake_case = False
def UpperCAmelCase ( self ) -> str:
snake_case : Dict = NystromformerModelTester(self )
snake_case : str = ConfigTester(self , config_class=A , hidden_size=3_7 )
def UpperCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> Any:
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case : int = type
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase ( self ) -> Dict:
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def UpperCAmelCase ( self ) -> Any:
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : Any = NystromformerModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self ) -> Dict:
snake_case : Optional[int] = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""" )
snake_case : int = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
snake_case : Optional[int] = model(A )[0]
snake_case : List[Any] = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , A )
snake_case : Union[str, Any] = torch.tensor(
[[[-0.45_32, -0.09_36, 0.51_37], [-0.26_76, 0.06_28, 0.61_86], [-0.36_29, -0.17_26, 0.47_16]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , A , atol=1e-4 ) )
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Union[str, Any] = """the [MASK] of Belgium is Brussels"""
snake_case : Any = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""" )
snake_case : Union[str, Any] = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""" )
snake_case : int = tokenizer(A , return_tensors="""pt""" )
with torch.no_grad():
snake_case : Optional[int] = model(encoding.input_ids ).logits
snake_case : str = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(A ) , """capital""" )
| 124 | 1 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__ :
def __init__( self : int , snake_case__ : List[str] , snake_case__ : Union[str, Any]=13 , snake_case__ : Optional[int]=32 , snake_case__ : int=3 , snake_case__ : List[str]=4 , snake_case__ : Optional[int]=[10, 20, 30, 40] , snake_case__ : Tuple=[2, 2, 3, 2] , snake_case__ : Dict=True , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=37 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : Any=10 , snake_case__ : str=0.02 , snake_case__ : str=["stage2", "stage3", "stage4"] , snake_case__ : List[str]=[2, 3, 4] , snake_case__ : List[Any]=None , ):
lowerCamelCase_ : Dict =parent
lowerCamelCase_ : List[str] =batch_size
lowerCamelCase_ : str =image_size
lowerCamelCase_ : Tuple =num_channels
lowerCamelCase_ : List[str] =num_stages
lowerCamelCase_ : Optional[Any] =hidden_sizes
lowerCamelCase_ : str =depths
lowerCamelCase_ : int =is_training
lowerCamelCase_ : Optional[int] =use_labels
lowerCamelCase_ : Tuple =intermediate_size
lowerCamelCase_ : List[str] =hidden_act
lowerCamelCase_ : Union[str, Any] =num_labels
lowerCamelCase_ : Union[str, Any] =initializer_range
lowerCamelCase_ : Union[str, Any] =out_features
lowerCamelCase_ : List[str] =out_indices
lowerCamelCase_ : Union[str, Any] =scope
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ : Union[str, Any] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ : Dict =None
if self.use_labels:
lowerCamelCase_ : Union[str, Any] =ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase_ : Optional[Any] =self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : Tuple ):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : int ):
lowerCamelCase_ : Any =ConvNextVaModel(config=_a )
model.to(_a )
model.eval()
lowerCamelCase_ : Union[str, Any] =model(_a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase__ ( self : Any , snake_case__ : Any , snake_case__ : str , snake_case__ : Optional[Any] ):
lowerCamelCase_ : Tuple =ConvNextVaForImageClassification(_a )
model.to(_a )
model.eval()
lowerCamelCase_ : Optional[Any] =model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Dict , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : List[Any] ):
lowerCamelCase_ : int =ConvNextVaBackbone(config=_a )
model.to(_a )
model.eval()
lowerCamelCase_ : Any =model(_a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCamelCase_ : str =None
lowerCamelCase_ : Optional[Any] =ConvNextVaBackbone(config=_a )
model.to(_a )
model.eval()
lowerCamelCase_ : Dict =model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : Dict =self.prepare_config_and_inputs()
lowerCamelCase_ : Optional[Any] =config_and_inputs
lowerCamelCase_ : List[str] ={'pixel_values': pixel_values}
return config, inputs_dict
def UpperCAmelCase__ ( self : Optional[int] ):
lowerCamelCase_ : Dict =self.prepare_config_and_inputs()
lowerCamelCase_ : Tuple =config_and_inputs
lowerCamelCase_ : Union[str, Any] ={'pixel_values': pixel_values, 'labels': labels}
return config, inputs_dict
@require_torch
class lowercase__ ( lowercase__, lowercase__, unittest.TestCase ):
_UpperCAmelCase :Dict = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
_UpperCAmelCase :Dict = (
{'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase :Optional[int] = False
_UpperCAmelCase :List[str] = False
_UpperCAmelCase :Tuple = False
_UpperCAmelCase :List[str] = False
_UpperCAmelCase :List[Any] = False
def UpperCAmelCase__ ( self : Tuple ):
lowerCamelCase_ : Any =ConvNextVaModelTester(self )
lowerCamelCase_ : Dict =ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def UpperCAmelCase__ ( self : Any ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self : List[str] ):
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def UpperCAmelCase__ ( self : int ):
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def UpperCAmelCase__ ( self : Dict ):
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def UpperCAmelCase__ ( self : Tuple ):
pass
def UpperCAmelCase__ ( self : str ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCamelCase_ : Dict =self.model_tester.prepare_config_and_inputs_with_labels()
lowerCamelCase_ : List[str] =True
if model_class.__name__ in [
*get_values(_a ),
*get_values(_a ),
]:
continue
lowerCamelCase_ : Union[str, Any] =model_class(_a )
model.to(_a )
model.train()
lowerCamelCase_ : Any =self._prepare_for_class(_a , _a , return_labels=_a )
lowerCamelCase_ : Optional[int] =model(**_a ).loss
loss.backward()
def UpperCAmelCase__ ( self : List[str] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCamelCase_ : Optional[Any] =self.model_tester.prepare_config_and_inputs_with_labels()
lowerCamelCase_ : Tuple =False
lowerCamelCase_ : int =True
if (
model_class.__name__
in [*get_values(_a ), *get_values(_a )]
or not model_class.supports_gradient_checkpointing
):
continue
lowerCamelCase_ : List[Any] =model_class(_a )
model.to(_a )
model.gradient_checkpointing_enable()
model.train()
lowerCamelCase_ : Any =self._prepare_for_class(_a , _a , return_labels=_a )
lowerCamelCase_ : str =model(**_a ).loss
loss.backward()
def UpperCAmelCase__ ( self : Union[str, Any] ):
lowerCamelCase_ : Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : Optional[int] =model_class(_a )
lowerCamelCase_ : List[str] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ : Any =[*signature.parameters.keys()]
lowerCamelCase_ : Optional[Any] =['pixel_values']
self.assertListEqual(arg_names[:1] , _a )
def UpperCAmelCase__ ( self : Tuple ):
lowerCamelCase_ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def UpperCAmelCase__ ( self : Any ):
def check_hidden_states_output(snake_case__ : Any , snake_case__ : List[str] , snake_case__ : List[Any] ):
lowerCamelCase_ : Any =model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
lowerCamelCase_ : int =model(**self._prepare_for_class(_a , _a ) )
lowerCamelCase_ : Any =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase_ : Tuple =self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase_ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : Any =True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ : Any =True
check_hidden_states_output(_a , _a , _a )
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def UpperCAmelCase__ ( self : int ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ : Optional[int] =ConvNextVaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def _snake_case ( ) -> Any:
lowerCamelCase_ : Union[str, Any] =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : Dict ):
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def UpperCAmelCase__ ( self : Optional[int] ):
lowerCamelCase_ : str =ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(_a )
lowerCamelCase_ : Any =self.default_image_processor
lowerCamelCase_ : Dict =prepare_img()
lowerCamelCase_ : Tuple =preprocessor(images=_a , return_tensors="pt" ).to(_a )
# forward pass
with torch.no_grad():
lowerCamelCase_ : str =model(**_a )
# verify the logits
lowerCamelCase_ : int =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
lowerCamelCase_ : int =torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
| 355 |
"""simple docstring"""
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def _snake_case ( lowerCamelCase__ : Any ) -> Union[str, Any]: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def _snake_case ( ) -> List[Any]:
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
lowerCamelCase_ : Optional[Any] =[1, 2, 3]
with pytest.raises(lowerCamelCase__ ):
with parallel_backend("unsupported backend" ):
map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=2 )
with pytest.raises(lowerCamelCase__ ):
with parallel_backend("unsupported backend" ):
map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def _snake_case ( lowerCamelCase__ : Tuple ) -> Optional[Any]:
lowerCamelCase_ : str =[1, 2]
lowerCamelCase_ : List[str] ={"a": 1, "b": 2}
lowerCamelCase_ : List[str] ={"a": [1, 2], "b": [3, 4]}
lowerCamelCase_ : Optional[int] ={"a": {"1": 1}, "b": 2}
lowerCamelCase_ : int ={"a": 1, "b": 2, "c": 3, "d": 4}
lowerCamelCase_ : Optional[int] =[2, 3]
lowerCamelCase_ : List[Any] ={"a": 2, "b": 3}
lowerCamelCase_ : int ={"a": [2, 3], "b": [4, 5]}
lowerCamelCase_ : str ={"a": {"1": 2}, "b": 3}
lowerCamelCase_ : Dict ={"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) == expected_map_nested_sa
assert map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) == expected_map_nested_sa
assert map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) == expected_map_nested_sa
assert map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) == expected_map_nested_sa
assert map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) == expected_map_nested_sa
| 209 | 0 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class lowercase__ :
def __init__( self : Optional[Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : List[Any]=13 ,lowerCamelCase__ : Dict=10 ,lowerCamelCase__ : Dict=3 ,lowerCamelCase__ : List[str]=2 ,lowerCamelCase__ : List[Any]=2 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : Union[str, Any]=32 ,lowerCamelCase__ : Dict=5 ,lowerCamelCase__ : int=4 ,lowerCamelCase__ : Tuple=37 ,lowerCamelCase__ : str="gelu" ,lowerCamelCase__ : Optional[int]=0.1 ,lowerCamelCase__ : Union[str, Any]=0.1 ,lowerCamelCase__ : Union[str, Any]=10 ,lowerCamelCase__ : Any=0.0_2 ,lowerCamelCase__ : int=0.9 ,lowerCamelCase__ : Optional[int]=None ,):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = parent
_UpperCamelCase : Any = batch_size
_UpperCamelCase : Union[str, Any] = image_size
_UpperCamelCase : Any = num_channels
_UpperCamelCase : Any = patch_size
_UpperCamelCase : str = tubelet_size
_UpperCamelCase : Dict = num_frames
_UpperCamelCase : str = is_training
_UpperCamelCase : str = use_labels
_UpperCamelCase : int = hidden_size
_UpperCamelCase : Any = num_hidden_layers
_UpperCamelCase : Dict = num_attention_heads
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : Tuple = hidden_act
_UpperCamelCase : Tuple = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : Any = type_sequence_label_size
_UpperCamelCase : str = initializer_range
_UpperCamelCase : List[str] = mask_ratio
_UpperCamelCase : Any = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
_UpperCamelCase : int = (image_size // patch_size) ** 2
_UpperCamelCase : int = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
_UpperCamelCase : List[Any] = int(mask_ratio * self.seq_length )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Any = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : List[Any] = None
if self.use_labels:
_UpperCamelCase : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_UpperCamelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return VideoMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,num_frames=self.num_frames ,tubelet_size=self.tubelet_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowerCamelCase__ ,initializer_range=self.initializer_range ,)
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : int ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : List[str] ):
'''simple docstring'''
_UpperCamelCase : List[str] = VideoMAEModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : Dict ):
'''simple docstring'''
_UpperCamelCase : List[str] = VideoMAEForPreTraining(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_UpperCamelCase : Union[str, Any] = torch.ones((self.num_masks,) )
_UpperCamelCase : Any = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
_UpperCamelCase : Optional[int] = mask.expand(self.batch_size ,-1 ).bool()
_UpperCamelCase : List[str] = model(lowerCamelCase__ ,lowerCamelCase__ )
# model only returns predictions for masked patches
_UpperCamelCase : Optional[int] = mask.sum().item()
_UpperCamelCase : List[Any] = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_masked_patches, decoder_num_labels) )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : str = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Union[str, Any] = config_and_inputs
_UpperCamelCase : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( lowercase , lowercase , unittest.TestCase ):
lowercase__ = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
lowercase__ = (
{"""feature-extraction""": VideoMAEModel, """video-classification""": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : Tuple = VideoMAEModelTester(self )
_UpperCamelCase : str = ConfigTester(self ,config_class=lowerCamelCase__ ,has_text_modality=lowerCamelCase__ ,hidden_size=37 )
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Union[str, Any]=False ):
'''simple docstring'''
_UpperCamelCase : List[str] = copy.deepcopy(lowerCamelCase__ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_UpperCamelCase : List[Any] = torch.ones((self.model_tester.num_masks,) )
_UpperCamelCase : int = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
_UpperCamelCase : List[Any] = mask.expand(self.model_tester.batch_size ,-1 ).bool()
_UpperCamelCase : int = bool_masked_pos.to(lowerCamelCase__ )
if return_labels:
if model_class in [
*get_values(lowerCamelCase__ ),
]:
_UpperCamelCase : Union[str, Any] = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowerCamelCase__ )
return inputs_dict
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='VideoMAE does not use inputs_embeds' )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : int = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_UpperCamelCase : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ ,nn.Linear ) )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Dict = model_class(lowerCamelCase__ )
_UpperCamelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : Any = [*signature.parameters.keys()]
_UpperCamelCase : int = ['pixel_values']
self.assertListEqual(arg_names[:1] ,lowerCamelCase__ )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase__ )
@slow
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : str = VideoMAEModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
if not self.has_attentions:
pass
else:
_UpperCamelCase , _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : List[str] = True
for model_class in self.all_model_classes:
_UpperCamelCase : int = self.model_tester.seq_length - self.model_tester.num_masks
_UpperCamelCase : Any = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : str = False
_UpperCamelCase : Tuple = True
_UpperCamelCase : Tuple = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
_UpperCamelCase : List[Any] = model(**self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ) )
_UpperCamelCase : Union[str, Any] = outputs.attentions
self.assertEqual(len(lowerCamelCase__ ) ,self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : Any = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
_UpperCamelCase : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ) )
_UpperCamelCase : Optional[int] = outputs.attentions
self.assertEqual(len(lowerCamelCase__ ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len, seq_len] ,)
_UpperCamelCase : int = len(lowerCamelCase__ )
# Check attention is always last and order is fine
_UpperCamelCase : Tuple = True
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : List[Any] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
_UpperCamelCase : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ) )
self.assertEqual(out_len + 1 ,len(lowerCamelCase__ ) )
_UpperCamelCase : List[str] = outputs.attentions
self.assertEqual(len(lowerCamelCase__ ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len, seq_len] ,)
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : List[str] ):
_UpperCamelCase : Dict = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
_UpperCamelCase : Dict = model(**self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ) )
_UpperCamelCase : Optional[int] = outputs.hidden_states
_UpperCamelCase : Optional[int] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowerCamelCase__ ) ,lowerCamelCase__ )
_UpperCamelCase : Tuple = self.model_tester.seq_length - self.model_tester.num_masks
_UpperCamelCase : int = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[seq_length, self.model_tester.hidden_size] ,)
_UpperCamelCase , _UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : str = True
check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase : List[Any] = True
check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
def A__ ( ):
_UpperCamelCase : List[Any] = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
_UpperCamelCase : Any = np.load(UpperCAmelCase_ )
return list(UpperCAmelCase_ )
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] ,image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase : int = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to(
lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = self.default_image_processor
_UpperCamelCase : int = prepare_video()
_UpperCamelCase : int = image_processor(lowerCamelCase__ ,return_tensors='pt' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_UpperCamelCase : Union[str, Any] = model(**lowerCamelCase__ )
# verify the logits
_UpperCamelCase : Optional[int] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase__ ,atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(lowerCamelCase__ )
_UpperCamelCase : List[str] = self.default_image_processor
_UpperCamelCase : str = prepare_video()
_UpperCamelCase : int = image_processor(lowerCamelCase__ ,return_tensors='pt' ).to(lowerCamelCase__ )
# add boolean mask, indicating which patches to mask
_UpperCamelCase : Optional[Any] = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' ,filename='bool_masked_pos.pt' )
_UpperCamelCase : Optional[Any] = torch.load(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_UpperCamelCase : str = model(**lowerCamelCase__ )
# verify the logits
_UpperCamelCase : Tuple = torch.Size([1, 1408, 1536] )
_UpperCamelCase : Optional[int] = torch.tensor(
[[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] ,device=lowerCamelCase__ )
self.assertEqual(outputs.logits.shape ,lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,lowerCamelCase__ ,atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
_UpperCamelCase : List[Any] = torch.tensor([0.5_1_4_2] ,device=lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.loss ,lowerCamelCase__ ,atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
_UpperCamelCase : Optional[Any] = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ,norm_pix_loss=lowerCamelCase__ ).to(
lowerCamelCase__ )
with torch.no_grad():
_UpperCamelCase : Optional[Any] = model(**lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = torch.tensor(torch.tensor([0.6_4_6_9] ) ,device=lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.loss ,lowerCamelCase__ ,atol=1E-4 ) )
| 83 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
snake_case_ : Any = [
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def A__ ( ):
_UpperCamelCase : Tuple = Github(os.environ['GITHUB_TOKEN'] )
_UpperCamelCase : List[Any] = g.get_repo('huggingface/diffusers' )
_UpperCamelCase : List[Any] = repo.get_issues(state='open' )
for issue in open_issues:
_UpperCamelCase : Dict = sorted(issue.get_comments() , key=lambda UpperCAmelCase_ : i.created_at , reverse=UpperCAmelCase_ )
_UpperCamelCase : List[str] = comments[0] if len(UpperCAmelCase_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 83 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
class UpperCamelCase ( _UpperCAmelCase ):
lowerCAmelCase : Optional[Any] = ["""pixel_values"""]
def __init__( self , UpperCAmelCase__ = True , UpperCAmelCase__ = None , UpperCAmelCase__ = PIL.Image.BICUBIC , UpperCAmelCase__ = True , UpperCAmelCase__ = None , UpperCAmelCase__ = 1 / 255 , UpperCAmelCase__ = True , UpperCAmelCase__ = True , UpperCAmelCase__ = None , UpperCAmelCase__ = None , **UpperCAmelCase__ , ):
super().__init__(**UpperCAmelCase__ )
A__ = size if size is not None else {"height": 256, "width": 256}
A__ = get_size_dict(UpperCAmelCase__ )
A__ = crop_size if crop_size is not None else {"height": 224, "width": 224}
A__ = get_size_dict(UpperCAmelCase__ , param_name="crop_size" )
A__ = do_resize
A__ = size
A__ = resample
A__ = do_center_crop
A__ = crop_size
A__ = do_rescale
A__ = rescale_factor
A__ = do_normalize
A__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = PIL.Image.BICUBIC , UpperCAmelCase__ = None , **UpperCAmelCase__ , ):
A__ = get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
UpperCAmelCase__ , size=(size["height"], size["width"]) , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , **UpperCAmelCase__ , ):
A__ = get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(UpperCAmelCase__ , size=(size["height"], size["width"]) , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , **UpperCAmelCase__ , ):
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , **UpperCAmelCase__ , ):
return normalize(UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__=None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = ChannelDimension.FIRST , **UpperCAmelCase__ , ):
A__ = do_resize if do_resize is not None else self.do_resize
A__ = resample if resample is not None else self.resample
A__ = do_center_crop if do_center_crop is not None else self.do_center_crop
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = image_mean if image_mean is not None else self.image_mean
A__ = image_std if image_std is not None else self.image_std
A__ = size if size is not None else self.size
A__ = get_size_dict(UpperCAmelCase__ )
A__ = crop_size if crop_size is not None else self.crop_size
A__ = get_size_dict(UpperCAmelCase__ , param_name="crop_size" )
A__ = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
A__ = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_resize:
A__ = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_center_crop:
A__ = [self.center_crop(image=UpperCAmelCase__ , size=UpperCAmelCase__ ) for image in images]
if do_rescale:
A__ = [self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images]
if do_normalize:
A__ = [self.normalize(image=UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ ) for image in images]
A__ = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
A__ = {"pixel_values": images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
| 198 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
UpperCAmelCase_ : List[Any] = [
"python",
"tqdm",
"regex",
"requests",
"packaging",
"filelock",
"numpy",
"tokenizers",
"huggingface-hub",
"safetensors",
"accelerate",
"pyyaml",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def UpperCamelCase ( _A : List[Any] , _A : int=None )-> Optional[int]:
"""simple docstring"""
require_version(deps[pkg] , _A )
| 198 | 1 |
"""simple docstring"""
from pathlib import Path
import fire
from tqdm import tqdm
def _lowerCAmelCase ( UpperCamelCase_="ro" , UpperCamelCase_="en" , UpperCamelCase_="wmt16" , UpperCamelCase_=None ):
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("""run pip install datasets""" )
__SCREAMING_SNAKE_CASE = f"{src_lang}-{tgt_lang}"
print(f"Converting {dataset}-{pair}" )
__SCREAMING_SNAKE_CASE = datasets.load_dataset(UpperCamelCase_ , UpperCamelCase_ )
if save_dir is None:
__SCREAMING_SNAKE_CASE = f"{dataset}-{pair}"
__SCREAMING_SNAKE_CASE = Path(UpperCamelCase_ )
save_dir.mkdir(exist_ok=UpperCamelCase_ )
for split in ds.keys():
print(f"Splitting {split} with {ds[split].num_rows} records" )
# to save to val.source, val.target like summary datasets
__SCREAMING_SNAKE_CASE = """val""" if split == """validation""" else split
__SCREAMING_SNAKE_CASE = save_dir.joinpath(f"{fn}.source" )
__SCREAMING_SNAKE_CASE = save_dir.joinpath(f"{fn}.target" )
__SCREAMING_SNAKE_CASE = src_path.open("""w+""" )
__SCREAMING_SNAKE_CASE = tgt_path.open("""w+""" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__SCREAMING_SNAKE_CASE = x["""translation"""]
src_fp.write(ex[src_lang] + """\n""" )
tgt_fp.write(ex[tgt_lang] + """\n""" )
print(f"Saved {dataset} dataset to {save_dir}" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 100 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase :
def __init__(self : Dict , snake_case__ : Dict , snake_case__ : Any=13 , snake_case__ : Any=32 , snake_case__ : Optional[Any]=2 , snake_case__ : Union[str, Any]=3 , snake_case__ : List[Any]=16 , snake_case__ : int=[1, 2, 1] , snake_case__ : Dict=[2, 2, 4] , snake_case__ : Dict=2 , snake_case__ : Tuple=2.0 , snake_case__ : Optional[int]=True , snake_case__ : Union[str, Any]=0.0 , snake_case__ : Any=0.0 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : int="gelu" , snake_case__ : Optional[int]=False , snake_case__ : List[Any]=True , snake_case__ : List[str]=0.02 , snake_case__ : int=1e-5 , snake_case__ : List[str]=True , snake_case__ : Union[str, Any]=None , snake_case__ : List[Any]=True , snake_case__ : Optional[Any]=10 , snake_case__ : Optional[Any]=8 , snake_case__ : Any=["stage1", "stage2", "stage3"] , snake_case__ : Tuple=[1, 2, 3] , ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Any = parent
snake_case : Optional[int] = batch_size
snake_case : Union[str, Any] = image_size
snake_case : Dict = patch_size
snake_case : Optional[Any] = num_channels
snake_case : Union[str, Any] = embed_dim
snake_case : int = depths
snake_case : List[str] = num_heads
snake_case : Union[str, Any] = window_size
snake_case : Union[str, Any] = mlp_ratio
snake_case : List[Any] = qkv_bias
snake_case : List[Any] = hidden_dropout_prob
snake_case : Union[str, Any] = attention_probs_dropout_prob
snake_case : Union[str, Any] = drop_path_rate
snake_case : int = hidden_act
snake_case : Optional[int] = use_absolute_embeddings
snake_case : int = patch_norm
snake_case : Union[str, Any] = layer_norm_eps
snake_case : Any = initializer_range
snake_case : Optional[Any] = is_training
snake_case : Tuple = scope
snake_case : Optional[int] = use_labels
snake_case : Optional[Any] = type_sequence_label_size
snake_case : Union[str, Any] = encoder_stride
snake_case : Any = out_features
snake_case : Tuple = out_indices
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case : int = None
if self.use_labels:
snake_case : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : Dict = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> int:
'''simple docstring'''
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
snake_case : Union[str, Any] = MaskFormerSwinModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case : List[Any] = model(snake_case__ )
snake_case : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
snake_case : int = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Union[str, Any] ) -> str:
'''simple docstring'''
snake_case : Optional[int] = MaskFormerSwinBackbone(config=snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case : List[Any] = model(snake_case__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(snake_case__ ):
snake_case : Tuple = ["stem"]
snake_case : List[Any] = MaskFormerSwinBackbone(config=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]:
'''simple docstring'''
snake_case : Union[str, Any] = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case : List[Any] = config_and_inputs
snake_case : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( A_ ,A_ ,unittest.TestCase ):
A__ : List[str] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
A__ : str = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
A__ : Optional[Any] = False
A__ : List[Any] = False
A__ : List[str] = False
A__ : List[str] = False
A__ : Union[str, Any] = False
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case : str = MaskFormerSwinModelTester(self )
snake_case : Optional[int] = ConfigTester(self , config_class=snake_case__ , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
) )
def _SCREAMING_SNAKE_CASE (self : str ) -> Optional[Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : str ) -> List[str]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> List[Any]:
'''simple docstring'''
return
def _SCREAMING_SNAKE_CASE (self : Dict ) -> str:
'''simple docstring'''
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : int ) -> Dict:
'''simple docstring'''
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*snake_case__ )
@unittest.skip("Swin does not use inputs_embeds" )
def _SCREAMING_SNAKE_CASE (self : int ) -> Any:
'''simple docstring'''
pass
@unittest.skip("Swin does not support feedforward chunking" )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Dict:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case , snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : int = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case , snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : str = model_class(snake_case__ )
snake_case : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : Optional[Any] = [*signature.parameters.keys()]
snake_case : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Any:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : Tuple ) -> Optional[int]:
'''simple docstring'''
snake_case : Tuple = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
snake_case : Any = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
snake_case : int = outputs.hidden_states
snake_case : Union[str, Any] = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(snake_case__ ) , snake_case__ )
# Swin has a different seq_length
snake_case : Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case , snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
snake_case : int = True
self.check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case : Dict = True
self.check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : int ) -> Any:
'''simple docstring'''
snake_case , snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : Any = 3
snake_case : List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
snake_case : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case : str = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
snake_case : str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
snake_case : str = True
self.check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case : Optional[Any] = True
self.check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ , (padded_height, padded_width) )
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def _SCREAMING_SNAKE_CASE (self : str ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def _SCREAMING_SNAKE_CASE (self : int ) -> str:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Any ) -> Any:
'''simple docstring'''
snake_case , snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(snake_case__ : Union[str, Any] ):
snake_case : Any = 0
return t
def check_equivalence(snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Optional[int]={} ):
with torch.no_grad():
snake_case : Optional[Any] = model(**snake_case__ , return_dict=snake_case__ , **snake_case__ )
snake_case : Tuple = model(**snake_case__ , return_dict=snake_case__ , **snake_case__ ).to_tuple()
def recursive_check(snake_case__ : List[str] , snake_case__ : Optional[Any] ):
if isinstance(snake_case__ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(snake_case__ , snake_case__ ):
recursive_check(snake_case__ , snake_case__ )
elif isinstance(snake_case__ , snake_case__ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(snake_case__ , snake_case__ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(snake_case__ ) , set_nan_tensor_to_zero(snake_case__ ) , atol=1e-5 ) , msg=(
"Tuple and dict output are not equal. Difference:"
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(snake_case__ ).any()} and `inf`: {torch.isinf(snake_case__ )}. Dict has"""
f""" `nan`: {torch.isnan(snake_case__ ).any()} and `inf`: {torch.isinf(snake_case__ )}."""
) , )
recursive_check(snake_case__ , snake_case__ )
for model_class in self.all_model_classes:
snake_case : Optional[int] = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case : Union[str, Any] = self._prepare_for_class(snake_case__ , snake_case__ )
snake_case : Tuple = self._prepare_for_class(snake_case__ , snake_case__ )
check_equivalence(snake_case__ , snake_case__ , snake_case__ )
snake_case : Tuple = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
snake_case : Optional[Any] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
check_equivalence(snake_case__ , snake_case__ , snake_case__ )
snake_case : Dict = self._prepare_for_class(snake_case__ , snake_case__ )
snake_case : List[Any] = self._prepare_for_class(snake_case__ , snake_case__ )
check_equivalence(snake_case__ , snake_case__ , snake_case__ , {"output_hidden_states": True} )
snake_case : Any = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
snake_case : List[str] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
check_equivalence(snake_case__ , snake_case__ , snake_case__ , {"output_hidden_states": True} )
@require_torch
class UpperCAmelCase ( unittest.TestCase ,A_ ):
A__ : int = (MaskFormerSwinBackbone,) if is_torch_available() else ()
A__ : int = MaskFormerSwinConfig
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Any:
'''simple docstring'''
snake_case : Union[str, Any] = MaskFormerSwinModelTester(self )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
snake_case , snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : Optional[int] = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
snake_case : Optional[int] = backbone_class(snake_case__ )
backbone.to(snake_case__ )
backbone.eval()
snake_case : Union[str, Any] = backbone(**snake_case__ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , snake_case__ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
snake_case : Optional[int] = backbone(**snake_case__ , output_hidden_states=snake_case__ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
snake_case , snake_case , snake_case : Dict = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
snake_case : Optional[Any] = backbone(**snake_case__ , output_attentions=snake_case__ )
self.assertIsNotNone(outputs.attentions )
| 59 | 0 |
from ..utils import DummyObject, requires_backends
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
def __magic_name__ ( *__a : List[Any] , **__a : int ):
'''simple docstring'''
requires_backends(__a , ["""torch"""] )
def __magic_name__ ( *__a : int , **__a : Any ):
'''simple docstring'''
requires_backends(__a , ["""torch"""] )
def __magic_name__ ( *__a : List[str] , **__a : str ):
'''simple docstring'''
requires_backends(__a , ["""torch"""] )
def __magic_name__ ( *__a : Tuple , **__a : List[Any] ):
'''simple docstring'''
requires_backends(__a , ["""torch"""] )
def __magic_name__ ( *__a : str , **__a : Any ):
'''simple docstring'''
requires_backends(__a , ["""torch"""] )
def __magic_name__ ( *__a : List[Any] , **__a : str ):
'''simple docstring'''
requires_backends(__a , ["""torch"""] )
def __magic_name__ ( *__a : Optional[int] , **__a : Optional[int] ):
'''simple docstring'''
requires_backends(__a , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
class __A( metaclass=__lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torch"""]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch"""] )
| 352 |
import collections
import os
import re
from pathlib import Path
lowerCamelCase_ = '''src/transformers'''
# Matches is_xxx_available()
lowerCamelCase_ = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
lowerCamelCase_ = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCamelCase_ = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
lowerCamelCase_ = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
lowerCamelCase_ = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCamelCase_ = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCamelCase_ = re.compile(r'''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCamelCase_ = re.compile(r'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
lowerCamelCase_ = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
lowerCamelCase_ = re.compile(r'''^\s*try:''')
# Catches a line with else:
lowerCamelCase_ = re.compile(r'''^\s*else:''')
def __magic_name__ ( __a : List[Any] ):
'''simple docstring'''
if _re_test_backend.search(__a ) is None:
return None
UpperCamelCase__ = [b[0] for b in _re_backend.findall(__a )]
backends.sort()
return "_and_".join(__a )
def __magic_name__ ( __a : Dict ):
'''simple docstring'''
with open(__a , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCamelCase__ = f.readlines()
UpperCamelCase__ = 0
while line_index < len(__a ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__a ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCamelCase__ = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
UpperCamelCase__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__a ):
UpperCamelCase__ = _re_one_line_import_struct.search(__a ).groups()[0]
UpperCamelCase__ = re.findall(R"""\[([^\]]+)\]""" , __a )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
UpperCamelCase__ = _re_import_struct_key_value.search(__a )
if single_line_import_search is not None:
UpperCamelCase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(__a ) > 0]
objects.extend(__a )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
UpperCamelCase__ = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCamelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
UpperCamelCase__ = lines[line_index]
if _re_import_struct_add_one.search(__a ) is not None:
objects.append(_re_import_struct_add_one.search(__a ).groups()[0] )
elif _re_import_struct_add_many.search(__a ) is not None:
UpperCamelCase__ = _re_import_struct_add_many.search(__a ).groups()[0].split(""", """ )
UpperCamelCase__ = [obj[1:-1] for obj in imports if len(__a ) > 0]
objects.extend(__a )
elif _re_between_brackets.search(__a ) is not None:
UpperCamelCase__ = _re_between_brackets.search(__a ).groups()[0].split(""", """ )
UpperCamelCase__ = [obj[1:-1] for obj in imports if len(__a ) > 0]
objects.extend(__a )
elif _re_quote_object.search(__a ) is not None:
objects.append(_re_quote_object.search(__a ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
UpperCamelCase__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCamelCase__ = []
while (
line_index < len(__a )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
UpperCamelCase__ = lines[line_index]
UpperCamelCase__ = _re_import.search(__a )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCamelCase__ = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(__a ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCamelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
UpperCamelCase__ = lines[line_index]
UpperCamelCase__ = _re_import.search(__a )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
UpperCamelCase__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __magic_name__ ( __a : Dict , __a : Dict ):
'''simple docstring'''
def find_duplicates(__a : Optional[Any] ):
return [k for k, v in collections.Counter(__a ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCamelCase__ = []
for key in import_dict_objects.keys():
UpperCamelCase__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f"Duplicate _import_structure definitions for: {duplicate_imports}" )
UpperCamelCase__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCamelCase__ = """base imports""" if key == """none""" else f"{key} backend"
errors.append(f"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f" {a} in _import_structure but not in TYPE_HINT." )
return errors
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = []
for root, _, files in os.walk(__a ):
if "__init__.py" in files:
UpperCamelCase__ = os.path.join(__a , """__init__.py""" )
UpperCamelCase__ = parse_init(__a )
if objects is not None:
UpperCamelCase__ = analyze_results(*__a )
if len(__a ) > 0:
UpperCamelCase__ = f"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("""\n""".join(__a ) )
if len(__a ) > 0:
raise ValueError("""\n\n""".join(__a ) )
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = []
for path, directories, files in os.walk(__a ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(__a )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__a ) / folder).glob("""*.py""" ) ) ) == 0:
continue
UpperCamelCase__ = str((Path(__a ) / folder).relative_to(__a ) )
UpperCamelCase__ = short_path.replace(os.path.sep , """.""" )
submodules.append(__a )
for fname in files:
if fname == "__init__.py":
continue
UpperCamelCase__ = str((Path(__a ) / fname).relative_to(__a ) )
UpperCamelCase__ = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(__a )
return submodules
lowerCamelCase_ = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def __magic_name__ ( ):
'''simple docstring'''
from transformers.utils import direct_transformers_import
UpperCamelCase__ = direct_transformers_import(__a )
UpperCamelCase__ = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(__a , """__init__.py""" ) , """r""" ) as f:
UpperCamelCase__ = f.read()
import_structure_keys.update(set(re.findall(R"""import_structure\[\"([^\"]*)\"\]""" , __a ) ) )
UpperCamelCase__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(__a ) > 0:
UpperCamelCase__ = """\n""".join(f"- {module}" for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registed in the main init of Transformers:\n"""
f"{list_of_modules}\n"
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 178 | 0 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Any , _A : str , _A : Optional[Any]=13 , _A : Union[str, Any]=10 , _A : Dict=3 , _A : Union[str, Any]=2 , _A : List[Any]=2 , _A : List[str]=2 , _A : List[str]=True , _A : Dict=True , _A : Tuple=32 , _A : Tuple=5 , _A : Optional[int]=4 , _A : Dict=37 , _A : List[Any]="gelu" , _A : Any=0.1 , _A : Union[str, Any]=0.1 , _A : Dict=10 , _A : int=0.0_2 , _A : int=0.9 , _A : Tuple=None , ) -> List[Any]:
"""simple docstring"""
snake_case_ : List[Any] = parent
snake_case_ : str = batch_size
snake_case_ : List[Any] = image_size
snake_case_ : List[Any] = num_channels
snake_case_ : List[str] = patch_size
snake_case_ : Tuple = tubelet_size
snake_case_ : List[Any] = num_frames
snake_case_ : Optional[Any] = is_training
snake_case_ : int = use_labels
snake_case_ : Union[str, Any] = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Any = hidden_dropout_prob
snake_case_ : List[Any] = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = type_sequence_label_size
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : List[str] = mask_ratio
snake_case_ : Union[str, Any] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
snake_case_ : List[Any] = (image_size // patch_size) ** 2
snake_case_ : Optional[int] = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
snake_case_ : Optional[int] = int(mask_ratio * self.seq_length )
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
"""simple docstring"""
snake_case_ : Dict = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
snake_case_ : int = None
if self.use_labels:
snake_case_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Any = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
"""simple docstring"""
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self : Tuple , _A : Dict , _A : Optional[int] , _A : Optional[Any] ) -> Any:
"""simple docstring"""
snake_case_ : List[Any] = VideoMAEModel(config=_A )
model.to(_A )
model.eval()
snake_case_ : str = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self : Union[str, Any] , _A : int , _A : List[Any] , _A : List[Any] ) -> int:
"""simple docstring"""
snake_case_ : List[str] = VideoMAEForPreTraining(_A )
model.to(_A )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
snake_case_ : Dict = torch.ones((self.num_masks,) )
snake_case_ : List[Any] = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
snake_case_ : Tuple = mask.expand(self.batch_size , -1 ).bool()
snake_case_ : int = model(_A , _A )
# model only returns predictions for masked patches
snake_case_ : Optional[int] = mask.sum().item()
snake_case_ : List[Any] = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : int = self.prepare_config_and_inputs()
snake_case_ ,snake_case_ ,snake_case_ : List[str] = config_and_inputs
snake_case_ : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( snake_case_ , snake_case_ , unittest.TestCase ):
__magic_name__: Tuple = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
__magic_name__: Optional[Any] = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
__magic_name__: List[str] = False
__magic_name__: Tuple = False
__magic_name__: Dict = False
__magic_name__: List[Any] = False
def UpperCAmelCase_ ( self : Dict ) -> str:
"""simple docstring"""
snake_case_ : List[str] = VideoMAEModelTester(self )
snake_case_ : str = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def UpperCAmelCase_ ( self : Optional[Any] , _A : List[str] , _A : Any , _A : int=False ) -> Tuple:
"""simple docstring"""
snake_case_ : Union[str, Any] = copy.deepcopy(_A )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
snake_case_ : Optional[Any] = torch.ones((self.model_tester.num_masks,) )
snake_case_ : Union[str, Any] = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
snake_case_ : List[str] = mask.expand(self.model_tester.batch_size , -1 ).bool()
snake_case_ : Optional[Any] = bool_masked_pos.to(_A )
if return_labels:
if model_class in [
*get_values(_A ),
]:
snake_case_ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='VideoMAE does not use inputs_embeds' )
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
snake_case_ ,snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : str = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , nn.Linear ) )
def UpperCAmelCase_ ( self : str ) -> List[Any]:
"""simple docstring"""
snake_case_ ,snake_case_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Tuple = model_class(_A )
snake_case_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : List[str] = [*signature.parameters.keys()]
snake_case_ : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_A )
@slow
def UpperCAmelCase_ ( self : str ) -> int:
"""simple docstring"""
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Union[str, Any] = VideoMAEModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def UpperCAmelCase_ ( self : int ) -> List[str]:
"""simple docstring"""
if not self.has_attentions:
pass
else:
snake_case_ ,snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Optional[Any] = True
for model_class in self.all_model_classes:
snake_case_ : Union[str, Any] = self.model_tester.seq_length - self.model_tester.num_masks
snake_case_ : Union[str, Any] = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
snake_case_ : Any = True
snake_case_ : Tuple = False
snake_case_ : Optional[Any] = True
snake_case_ : Optional[int] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
snake_case_ : Optional[Any] = model(**self._prepare_for_class(_A , _A ) )
snake_case_ : List[str] = outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case_ : List[Any] = True
snake_case_ : Optional[Any] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
snake_case_ : List[Any] = model(**self._prepare_for_class(_A , _A ) )
snake_case_ : Optional[int] = outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
snake_case_ : Tuple = len(_A )
# Check attention is always last and order is fine
snake_case_ : Any = True
snake_case_ : int = True
snake_case_ : Tuple = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
snake_case_ : int = model(**self._prepare_for_class(_A , _A ) )
self.assertEqual(out_len + 1 , len(_A ) )
snake_case_ : Union[str, Any] = outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
def check_hidden_states_output(_A : Tuple , _A : int , _A : str ):
snake_case_ : List[str] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
snake_case_ : Any = model(**self._prepare_for_class(_A , _A ) )
snake_case_ : List[Any] = outputs.hidden_states
snake_case_ : Optional[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_A ) , _A )
snake_case_ : Union[str, Any] = self.model_tester.seq_length - self.model_tester.num_masks
snake_case_ : Union[str, Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
snake_case_ ,snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : List[str] = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ : Union[str, Any] = True
check_hidden_states_output(_A , _A , _A )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : List[str] = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
snake_case_ : List[str] = np.load(__a )
return list(__a )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase_ ( self : List[str] ) -> int:
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
"""simple docstring"""
snake_case_ : Any = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to(
_A )
snake_case_ : List[Any] = self.default_image_processor
snake_case_ : Optional[int] = prepare_video()
snake_case_ : Optional[int] = image_processor(_A , return_tensors='pt' ).to(_A )
# forward pass
with torch.no_grad():
snake_case_ : Union[str, Any] = model(**_A )
# verify the logits
snake_case_ : Any = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , _A )
snake_case_ : Optional[int] = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) )
@slow
def UpperCAmelCase_ ( self : str ) -> Any:
"""simple docstring"""
snake_case_ : Union[str, Any] = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(_A )
snake_case_ : Optional[Any] = self.default_image_processor
snake_case_ : List[str] = prepare_video()
snake_case_ : List[Any] = image_processor(_A , return_tensors='pt' ).to(_A )
# add boolean mask, indicating which patches to mask
snake_case_ : int = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
snake_case_ : Dict = torch.load(_A )
# forward pass
with torch.no_grad():
snake_case_ : int = model(**_A )
# verify the logits
snake_case_ : str = torch.Size([1, 1408, 1536] )
snake_case_ : Optional[Any] = torch.tensor(
[[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] , device=_A )
self.assertEqual(outputs.logits.shape , _A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _A , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
snake_case_ : List[str] = torch.tensor([0.5_1_4_2] , device=_A )
self.assertTrue(torch.allclose(outputs.loss , _A , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
snake_case_ : Dict = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=_A ).to(
_A )
with torch.no_grad():
snake_case_ : List[str] = model(**_A )
snake_case_ : Optional[Any] = torch.tensor(torch.tensor([0.6_4_6_9] ) , device=_A )
self.assertTrue(torch.allclose(outputs.loss , _A , atol=1E-4 ) )
| 327 |
from typing import Dict
from .base import GenericTensor, Pipeline
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
def UpperCAmelCase_ ( self : str , _A : Optional[Any]=None , _A : List[str]=None , _A : Optional[Any]=None , **_A : List[str] ) -> Any:
"""simple docstring"""
if tokenize_kwargs is None:
snake_case_ : Optional[Any] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
snake_case_ : int = truncation
snake_case_ : Optional[int] = tokenize_kwargs
snake_case_ : Dict = {}
if return_tensors is not None:
snake_case_ : Union[str, Any] = return_tensors
return preprocess_params, {}, postprocess_params
def UpperCAmelCase_ ( self : Optional[int] , _A : int , **_A : Any ) -> Dict[str, GenericTensor]:
"""simple docstring"""
snake_case_ : Dict = self.framework
snake_case_ : Any = self.tokenizer(_A , return_tensors=_A , **_A )
return model_inputs
def UpperCAmelCase_ ( self : Optional[Any] , _A : List[str] ) -> int:
"""simple docstring"""
snake_case_ : Tuple = self.model(**_A )
return model_outputs
def UpperCAmelCase_ ( self : Union[str, Any] , _A : str , _A : str=False ) -> Any:
"""simple docstring"""
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : List[str] , *_A : Union[str, Any] , **_A : Tuple ) -> List[str]:
"""simple docstring"""
return super().__call__(*_A , **_A )
| 327 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
__UpperCamelCase : Dict = logging.get_logger(__name__)
class a ( snake_case__ ):
snake_case__ = ["""pixel_values"""]
def __init__( self , _snake_case = True , _snake_case = None , _snake_case = PILImageResampling.BILINEAR , _snake_case = True , _snake_case = None , _snake_case = True , _snake_case = 1 / 2_55 , _snake_case = True , _snake_case = None , _snake_case = None , **_snake_case , ):
"""simple docstring"""
super().__init__(**_A )
lowerCAmelCase = size if size is not None else {'shortest_edge': 2_56}
lowerCAmelCase = get_size_dict(_A , default_to_square=_A )
lowerCAmelCase = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
lowerCAmelCase = get_size_dict(_A , param_name='crop_size' )
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = resample
lowerCAmelCase = do_center_crop
lowerCAmelCase = crop_size
lowerCAmelCase = do_rescale
lowerCAmelCase = rescale_factor
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case = PILImageResampling.BICUBIC , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
lowerCAmelCase = get_resize_output_image_size(_A , size=size['shortest_edge'] , default_to_square=_A )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(_A , size=(size['height'], size['width']) , data_format=_A , **_A )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case = None , **_snake_case ):
"""simple docstring"""
return rescale(_A , scale=_A , data_format=_A , **_A )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case = None , **_snake_case , ):
"""simple docstring"""
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def UpperCamelCase__ ( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = ChannelDimension.FIRST , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase = size if size is not None else self.size
lowerCAmelCase = get_size_dict(_A , default_to_square=_A )
lowerCAmelCase = resample if resample is not None else self.resample
lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase = get_size_dict(_A , param_name='crop_size' )
lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase = image_std if image_std is not None else self.image_std
lowerCAmelCase = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCAmelCase = [to_numpy_array(_A ) for image in images]
if do_resize:
lowerCAmelCase = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_center_crop:
lowerCAmelCase = [self.center_crop(image=_A , size=_A ) for image in images]
if do_rescale:
lowerCAmelCase = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
lowerCAmelCase = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
lowerCAmelCase = [to_channel_dimension_format(_A , _A ) for image in images]
lowerCAmelCase = {'pixel_values': images}
return BatchFeature(data=_A , tensor_type=_A )
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_A ) != len(_A ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(_A ):
lowerCAmelCase = target_sizes.numpy()
lowerCAmelCase = []
for idx in range(len(_A ) ):
lowerCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=_A )
lowerCAmelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_A )
else:
lowerCAmelCase = logits.argmax(dim=1 )
lowerCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 364 |
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a :
def __init__( self ):
"""simple docstring"""
lowerCAmelCase = ''
lowerCAmelCase = ''
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 2_56
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = cva.imread(_snake_case , 0 )
lowerCAmelCase = copy.deepcopy(self.img )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='x' )
lowerCAmelCase = np.sum(_snake_case )
for i in range(len(_snake_case ) ):
lowerCAmelCase = x[i] / self.k
self.sk += prk
lowerCAmelCase = (self.L - 1) * self.sk
if self.rem != 0:
lowerCAmelCase = int(last % last )
lowerCAmelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(_snake_case )
lowerCAmelCase = int(np.ma.count(self.img ) / self.img[1].size )
lowerCAmelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowerCAmelCase = self.img[j][i]
if num != self.last_list[num]:
lowerCAmelCase = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def UpperCamelCase__ ( self ):
"""simple docstring"""
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
__UpperCamelCase : int = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
__UpperCamelCase : List[Any] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 309 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
a__ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def __UpperCAmelCase ( __a : Any ,__a : Optional[Any] ,__a : Any ,__a : Any ,__a : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
for attribute in key.split('''.''' ):
_a : List[str] = getattr(__a ,__a )
if weight_type is not None:
_a : int = getattr(__a ,__a ).shape
else:
_a : Dict = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_a : Optional[int] = value
elif weight_type == "weight_g":
_a : List[Any] = value
elif weight_type == "weight_v":
_a : Optional[Any] = value
elif weight_type == "bias":
_a : str = value
else:
_a : int = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __UpperCAmelCase ( __a : Dict ,__a : Any ,__a : Tuple ) -> Optional[int]:
"""simple docstring"""
_a : int = []
_a : int = fairseq_model.state_dict()
_a : Dict = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_a : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
__a ,__a ,__a ,__a ,hf_model.config.feat_extract_norm == '''group''' ,)
_a : str = True
else:
for key, mapped_key in MAPPING.items():
_a : Optional[int] = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned):
_a : Union[str, Any] = True
if "*" in mapped_key:
_a : List[Any] = name.split(__a )[0].split('''.''' )[-2]
_a : List[str] = mapped_key.replace('''*''' ,__a )
if "weight_g" in name:
_a : int = '''weight_g'''
elif "weight_v" in name:
_a : Tuple = '''weight_v'''
elif "weight" in name:
_a : Optional[int] = '''weight'''
elif "bias" in name:
_a : str = '''bias'''
else:
_a : Optional[Any] = None
set_recursively(__a ,__a ,__a ,__a ,__a )
continue
if not is_used:
unused_weights.append(__a )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __UpperCAmelCase ( __a : Optional[Any] ,__a : Optional[int] ,__a : str ,__a : int ,__a : str ) -> Tuple:
"""simple docstring"""
_a : int = full_name.split('''conv_layers.''' )[-1]
_a : Any = name.split('''.''' )
_a : List[str] = int(items[0] )
_a : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_a : Optional[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_a : Tuple = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_a : Optional[int] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_a : Tuple = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def __UpperCAmelCase ( __a : int ,__a : int ,__a : str=None ,__a : List[Any]=None ,__a : List[str]=True ) -> Tuple:
"""simple docstring"""
if config_path is not None:
_a : Optional[Any] = HubertConfig.from_pretrained(__a )
else:
_a : List[str] = HubertConfig()
if is_finetuned:
if dict_path:
_a : Dict = Dictionary.load(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_a : Union[str, Any] = target_dict.pad_index
_a : Any = target_dict.bos_index
_a : Optional[int] = target_dict.eos_index
_a : Optional[Any] = len(target_dict.symbols )
_a : int = os.path.join(__a ,'''vocab.json''' )
if not os.path.isdir(__a ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__a ) )
return
os.makedirs(__a ,exist_ok=__a )
with open(__a ,'''w''' ,encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices ,__a )
_a : Tuple = WavaVecaCTCTokenizer(
__a ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token='''|''' ,do_lower_case=__a ,)
_a : Tuple = True if config.feat_extract_norm == '''layer''' else False
_a : Tuple = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__a ,return_attention_mask=__a ,)
_a : Tuple = WavaVecaProcessor(feature_extractor=__a ,tokenizer=__a )
processor.save_pretrained(__a )
_a : Tuple = HubertForCTC(__a )
else:
_a : Optional[int] = HubertModel(__a )
if is_finetuned:
_a , _a , _a : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
_a , _a , _a : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_a : int = model[0].eval()
recursively_load_weights(__a ,__a ,__a )
hf_wavavec.save_pretrained(__a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
a__ = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 235 |
def __UpperCAmelCase ( __a : float ) -> float:
"""simple docstring"""
return 10 - x * x
def __UpperCAmelCase ( __a : float ,__a : float ) -> float:
"""simple docstring"""
if equation(__a ) * equation(__a ) >= 0:
raise ValueError('''Wrong space!''' )
_a : Dict = a
while (b - a) >= 0.01:
# Find middle point
_a : Any = (a + b) / 2
# Check if middle point is root
if equation(__a ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__a ) * equation(__a ) < 0:
_a : str = c
else:
_a : Union[str, Any] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 235 | 1 |
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
UpperCamelCase : str = {}
def _lowercase ( self ):
"""simple docstring"""
print(self.vertex )
for i in self.vertex:
print(__SCREAMING_SNAKE_CASE , ''' -> ''' , ''' -> '''.join([str(__SCREAMING_SNAKE_CASE ) for j in self.vertex[i]] ) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if from_vertex in self.vertex:
self.vertex[from_vertex].append(__SCREAMING_SNAKE_CASE )
else:
# else make a new vertex
UpperCamelCase : Union[str, Any] = [to_vertex]
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = True
print(__SCREAMING_SNAKE_CASE , end=''' ''' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__UpperCAmelCase : Union[str, Any] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("DFS:")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 360 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , ):
"""simple docstring"""
UpperCamelCase : List[str] = size if size is not None else {'''height''': 18, '''width''': 18}
UpperCamelCase : int = parent
UpperCamelCase : List[Any] = batch_size
UpperCamelCase : Optional[int] = num_channels
UpperCamelCase : Union[str, Any] = image_size
UpperCamelCase : Union[str, Any] = min_resolution
UpperCamelCase : Tuple = max_resolution
UpperCamelCase : List[str] = do_resize
UpperCamelCase : List[str] = size
UpperCamelCase : int = apply_ocr
def _lowercase ( self ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase_ ( _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = LayoutLMvaImageProcessingTester(self )
@property
def _lowercase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''apply_ocr''' ) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
UpperCamelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(encoding.boxes , __SCREAMING_SNAKE_CASE )
# Test batched
UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
UpperCamelCase : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCamelCase : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
UpperCamelCase : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCamelCase : Optional[int] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = LayoutLMvaImageProcessor()
from datasets import load_dataset
UpperCamelCase : Dict = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
UpperCamelCase : List[Any] = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
UpperCamelCase : Union[str, Any] = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
UpperCamelCase : str = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __SCREAMING_SNAKE_CASE )
self.assertListEqual(encoding.boxes , __SCREAMING_SNAKE_CASE )
# with apply_OCR = False
UpperCamelCase : Optional[Any] = LayoutLMvaImageProcessor(apply_ocr=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 315 | 0 |
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
UpperCAmelCase : str = {
# 1536-bit
5: {
'prime': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF''',
base=16,
),
'generator': 2,
},
# 2048-bit
14: {
'prime': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AACAA68FFFFFFFFFFFFFFFF''',
base=16,
),
'generator': 2,
},
# 3072-bit
15: {
'prime': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''',
base=16,
),
'generator': 2,
},
# 4096-bit
16: {
'prime': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'''
+ '''FFFFFFFFFFFFFFFF''',
base=16,
),
'generator': 2,
},
# 6144-bit
17: {
'prime': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'''
+ '''8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'''
+ '''302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'''
+ '''A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'''
+ '''49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'''
+ '''FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'''
+ '''180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'''
+ '''3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'''
+ '''04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'''
+ '''B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'''
+ '''1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'''
+ '''E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'''
+ '''99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'''
+ '''04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'''
+ '''233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'''
+ '''D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'''
+ '''AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'''
+ '''DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'''
+ '''2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'''
+ '''F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'''
+ '''BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'''
+ '''B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'''
+ '''387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'''
+ '''6DCC4024FFFFFFFFFFFFFFFF''',
base=16,
),
'generator': 2,
},
# 8192-bit
18: {
'prime': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'''
+ '''F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'''
+ '''179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'''
+ '''DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'''
+ '''5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'''
+ '''D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'''
+ '''23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'''
+ '''06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'''
+ '''DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'''
+ '''12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'''
+ '''38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'''
+ '''741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'''
+ '''3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'''
+ '''22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'''
+ '''4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'''
+ '''062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'''
+ '''4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'''
+ '''B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'''
+ '''4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'''
+ '''9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'''
+ '''60C980DD98EDD3DFFFFFFFFFFFFFFFFF''',
base=16,
),
'generator': 2,
},
}
class _A:
"""simple docstring"""
def __init__( self , _A = 14 ):
if group not in primes:
raise ValueError('Unsupported Group' )
__A : int = primes[group]['prime']
__A : Optional[int] = primes[group]['generator']
__A : Optional[int] = int(hexlify(urandom(32 ) ) , base=16 )
def UpperCAmelCase_ ( self ):
return hex(self.__private_key )[2:]
def UpperCAmelCase_ ( self ):
__A : Dict = pow(self.generator , self.__private_key , self.prime )
return hex(_A )[2:]
def UpperCAmelCase_ ( self , _A ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(_A , (self.prime - 1) // 2 , self.prime ) == 1
)
def UpperCAmelCase_ ( self , _A ):
__A : str = int(_A , base=16 )
if not self.is_valid_public_key(_A ):
raise ValueError('Invalid public key' )
__A : Optional[Any] = pow(_A , self.__private_key , self.prime )
return shaaaa(str(_A ).encode() ).hexdigest()
@staticmethod
def UpperCAmelCase_ ( _A , _A ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(_A , (prime - 1) // 2 , _A ) == 1
)
@staticmethod
def UpperCAmelCase_ ( _A , _A , _A = 14 ):
__A : List[str] = int(_A , base=16 )
__A : List[Any] = int(_A , base=16 )
__A : List[Any] = primes[group]['prime']
if not DiffieHellman.is_valid_public_key_static(_A , _A ):
raise ValueError('Invalid public key' )
__A : Any = pow(_A , _A , _A )
return shaaaa(str(_A ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 |
import cva
import numpy as np
class _a :
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase : float , UpperCAmelCase : int ):
if k in (0.04, 0.06):
A_ = k
A_ = window_size
else:
raise ValueError("invalid k value" )
def __str__( self : Optional[Any] ):
return str(self.k )
def __A ( self : int , UpperCAmelCase : str ):
A_ = cva.imread(UpperCAmelCase , 0 )
A_ , A_ = img.shape
A_ = []
A_ = img.copy()
A_ = cva.cvtColor(UpperCAmelCase , cva.COLOR_GRAY2RGB )
A_ , A_ = np.gradient(UpperCAmelCase )
A_ = dx**2
A_ = dy**2
A_ = dx * dy
A_ = 0.04
A_ = self.window_size // 2
for y in range(UpperCAmelCase , h - offset ):
for x in range(UpperCAmelCase , w - offset ):
A_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A_ = (wxx * wyy) - (wxy**2)
A_ = wxx + wyy
A_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
__a :List[str] = HarrisCorner(0.04, 3)
__a , __a :str = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 312 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase__ ( lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = KandinskyVaaImgaImgPipeline
lowerCAmelCase_ = ['''image_embeds''', '''negative_image_embeds''', '''image''']
lowerCAmelCase_ = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
lowerCAmelCase_ = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
lowerCAmelCase_ = False
@property
def _snake_case ( self ):
"""simple docstring"""
return 32
@property
def _snake_case ( self ):
"""simple docstring"""
return 32
@property
def _snake_case ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def _snake_case ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def _snake_case ( self ):
"""simple docstring"""
return 1_00
@property
def _snake_case ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase_ : Any = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowercase_ : Union[str, Any] = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE )
return model
@property
def _snake_case ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _snake_case ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase_ : str = VQModel(**self.dummy_movq_kwargs )
return model
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[Any] = self.dummy_unet
lowercase_ : Dict = self.dummy_movq
lowercase_ : List[Any] = {
'''num_train_timesteps''': 10_00,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00_085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
lowercase_ : List[Any] = DDIMScheduler(**__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
lowercase_ : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__SCREAMING_SNAKE_CASE )
# create init_image
lowercase_ : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ : int = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE ) ).convert('''RGB''' ).resize((2_56, 2_56) )
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
lowercase_ : Any = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
lowercase_ : Optional[int] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : str = '''cpu'''
lowercase_ : Any = self.get_dummy_components()
lowercase_ : Optional[Any] = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) )
lowercase_ : Optional[Any] = output.images
lowercase_ : Tuple = pipe(
**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) , return_dict=__SCREAMING_SNAKE_CASE , )[0]
lowercase_ : Optional[Any] = image[0, -3:, -3:, -1]
lowercase_ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase_ : Dict = np.array(
[0.6_199_778, 0.63_984_406, 0.46_145_785, 0.62_944_984, 0.5_622_215, 0.47_306_132, 0.47_441_456, 0.4_607_606, 0.48_719_263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def _snake_case ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
lowercase_ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
lowercase_ : Tuple = '''A red cartoon frog, 4k'''
lowercase_ : Union[str, Any] = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
lowercase_ : str = pipeline.to(__SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase_ , lowercase_ : Tuple = pipe_prior(
__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
lowercase_ : Optional[int] = pipeline(
image=__SCREAMING_SNAKE_CASE , image_embeds=__SCREAMING_SNAKE_CASE , negative_image_embeds=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type='''np''' , )
lowercase_ : int = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 264 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
_lowercase : Tuple = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = field(default=lowerCamelCase_ , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
lowerCAmelCase_ = field(
default=lowerCamelCase_ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCAmelCase_ = field(
default=lowerCamelCase_ , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCamelCase_ , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCamelCase_ , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = super().to_dict()
for k, v in d.items():
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : str = v.to_dict()
return d
| 264 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ["""ReformerTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ["""ReformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ReformerAttention""",
"""ReformerForMaskedLM""",
"""ReformerForQuestionAnswering""",
"""ReformerForSequenceClassification""",
"""ReformerLayer""",
"""ReformerModel""",
"""ReformerModelWithLMHead""",
"""ReformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 78 |
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = '''encodec'''
def __init__(self : List[Any] , _lowerCAmelCase : Union[str, Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , _lowerCAmelCase : List[str]=2_4000 , _lowerCAmelCase : List[str]=1 , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Dict=128 , _lowerCAmelCase : Any=32 , _lowerCAmelCase : List[Any]=1 , _lowerCAmelCase : int=[8, 5, 4, 2] , _lowerCAmelCase : Any="weight_norm" , _lowerCAmelCase : List[str]=7 , _lowerCAmelCase : int=7 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : int=2 , _lowerCAmelCase : str=True , _lowerCAmelCase : str="reflect" , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : Tuple=1.0 , _lowerCAmelCase : Tuple=1024 , _lowerCAmelCase : str=None , _lowerCAmelCase : Dict=True , **_lowerCAmelCase : List[str] , ):
A = target_bandwidths
A = sampling_rate
A = audio_channels
A = normalize
A = chunk_length_s
A = overlap
A = hidden_size
A = num_filters
A = num_residual_layers
A = upsampling_ratios
A = norm_type
A = kernel_size
A = last_kernel_size
A = residual_kernel_size
A = dilation_growth_rate
A = use_causal_conv
A = pad_mode
A = compress
A = num_lstm_layers
A = trim_right_ratio
A = codebook_size
A = codebook_dim if codebook_dim is not None else hidden_size
A = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**_lowerCAmelCase )
@property
def A (self : List[Any] ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def A (self : Union[str, Any] ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def A (self : Any ):
A = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def A (self : List[str] ):
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 258 | 0 |
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase : List[Any] = 16
UpperCAmelCase : Tuple = 32
def __lowerCamelCase ( lowerCamelCase__ : Accelerator , lowerCamelCase__ : DatasetDict , lowerCamelCase__ : List[int] , lowerCamelCase__ : List[int] , lowerCamelCase__ : int = 16 ):
'''simple docstring'''
lowerCamelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCamelCase = DatasetDict(
{
"""train""": dataset["""train"""].select(UpperCamelCase__ ),
"""validation""": dataset["""train"""].select(UpperCamelCase__ ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(lowerCamelCase__ : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCamelCase__ : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase = 16
elif accelerator.mixed_precision != "no":
lowerCamelCase = 8
else:
lowerCamelCase = None
return tokenizer.pad(
UpperCamelCase__ , padding="""longest""" , max_length=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCamelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
lowerCamelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
lowerCamelCase = DataLoader(
tokenized_datasets["""test"""] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
return train_dataloader, eval_dataloader, test_dataloader
def __lowerCamelCase ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple ):
'''simple docstring'''
lowerCamelCase = []
# Download the dataset
lowerCamelCase = load_dataset("""glue""" , """mrpc""" )
# Create our splits
lowerCamelCase = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
lowerCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase = config['''lr''']
lowerCamelCase = int(config["""num_epochs"""] )
lowerCamelCase = int(config["""seed"""] )
lowerCamelCase = int(config["""batch_size"""] )
lowerCamelCase = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
lowerCamelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCamelCase = batch_size // MAX_GPU_BATCH_SIZE
lowerCamelCase = MAX_GPU_BATCH_SIZE
set_seed(UpperCamelCase__ )
# New Code #
# Create our folds:
lowerCamelCase = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] )
lowerCamelCase = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(UpperCamelCase__ ):
lowerCamelCase = get_fold_dataloaders(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=UpperCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase = model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase = AdamW(params=model.parameters() , lr=UpperCamelCase__ )
# Instantiate scheduler
lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase__ , num_warmup_steps=100 , num_training_steps=(len(UpperCamelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Now we train the model
for epoch in range(UpperCamelCase__ ):
model.train()
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCamelCase = model(**UpperCamelCase__ )
lowerCamelCase = outputs.loss
lowerCamelCase = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase = model(**UpperCamelCase__ )
lowerCamelCase = outputs.logits.argmax(dim=-1 )
lowerCamelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=UpperCamelCase__ , references=UpperCamelCase__ , )
lowerCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , UpperCamelCase__ )
# New Code #
# We also run predictions on the test set at the very end
lowerCamelCase = []
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase = model(**UpperCamelCase__ )
lowerCamelCase = outputs.logits
lowerCamelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(UpperCamelCase__ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
lowerCamelCase = torch.cat(UpperCamelCase__ , dim=0 )
lowerCamelCase = torch.stack(UpperCamelCase__ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
lowerCamelCase = metric.compute(predictions=UpperCamelCase__ , references=UpperCamelCase__ )
accelerator.print("""Average test metrics from all folds:""" , UpperCamelCase__ )
def __lowerCamelCase ( ):
'''simple docstring'''
lowerCamelCase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=UpperCamelCase__ , default=UpperCamelCase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""" , type=UpperCamelCase__ , default=3 , help="""The number of splits to perform across the dataset""" )
lowerCamelCase = parser.parse_args()
lowerCamelCase = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 351 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Union[str, Any] = {
"configuration_mobilebert": [
"MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileBertConfig",
"MobileBertOnnxConfig",
],
"tokenization_mobilebert": ["MobileBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = ["MobileBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = [
"MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileBertForMaskedLM",
"MobileBertForMultipleChoice",
"MobileBertForNextSentencePrediction",
"MobileBertForPreTraining",
"MobileBertForQuestionAnswering",
"MobileBertForSequenceClassification",
"MobileBertForTokenClassification",
"MobileBertLayer",
"MobileBertModel",
"MobileBertPreTrainedModel",
"load_tf_weights_in_mobilebert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = [
"TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileBertForMaskedLM",
"TFMobileBertForMultipleChoice",
"TFMobileBertForNextSentencePrediction",
"TFMobileBertForPreTraining",
"TFMobileBertForQuestionAnswering",
"TFMobileBertForSequenceClassification",
"TFMobileBertForTokenClassification",
"TFMobileBertMainLayer",
"TFMobileBertModel",
"TFMobileBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66 | 0 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
A : List[Any] = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def __lowerCamelCase ( __a :Union[str, Any] , __a :Tuple , __a :str , __a :Any=None ) -> Dict:
"""simple docstring"""
A__ = XLNetConfig.from_json_file(__a )
A__ = finetuning_task.lower() if finetuning_task is not None else """"""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F'Building PyTorch XLNetForSequenceClassification model from configuration: {config}' )
A__ = finetuning_task
A__ = GLUE_TASKS_NUM_LABELS[finetuning_task]
A__ = XLNetForSequenceClassification(__a )
elif "squad" in finetuning_task:
A__ = finetuning_task
A__ = XLNetForQuestionAnswering(__a )
else:
A__ = XLNetLMHeadModel(__a )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(__a , __a , __a )
# Save pytorch-model
A__ = os.path.join(__a , __a )
A__ = os.path.join(__a , __a )
print(F'Save PyTorch model to {os.path.abspath(__a )}' )
torch.save(model.state_dict() , __a )
print(F'Save configuration file to {os.path.abspath(__a )}' )
with open(__a , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
A : str = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 274 |
def __lowerCamelCase ( __a :str ) -> list:
"""simple docstring"""
A__ = [0] * len(__a )
for i in range(1 , len(__a ) ):
# use last results for better performance - dynamic programming
A__ = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
A__ = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
A__ = j
return prefix_result
def __lowerCamelCase ( __a :str ) -> int:
"""simple docstring"""
return max(prefix_function(__a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 274 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = '''▁'''
lowerCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
lowerCamelCase = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
lowerCamelCase = {'''vinai/bartpho-syllable''': 1024}
class _a ( _lowercase):
_a : Tuple = VOCAB_FILES_NAMES
_a : int = PRETRAINED_VOCAB_FILES_MAP
_a : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Tuple="<s>" , _SCREAMING_SNAKE_CASE : Union[str, Any]="</s>" , _SCREAMING_SNAKE_CASE : List[str]="</s>" , _SCREAMING_SNAKE_CASE : Tuple="<s>" , _SCREAMING_SNAKE_CASE : Optional[Any]="<unk>" , _SCREAMING_SNAKE_CASE : Any="<pad>" , _SCREAMING_SNAKE_CASE : str="<mask>" , _SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **_SCREAMING_SNAKE_CASE : Optional[Any] , )-> None:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ : int = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
lowerCAmelCase__ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ : Optional[int] = vocab_file
lowerCAmelCase__ : Optional[Any] = monolingual_vocab_file
lowerCAmelCase__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_SCREAMING_SNAKE_CASE ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
lowerCAmelCase__ : List[Any] = {}
lowerCAmelCase__ : List[str] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(_SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids:
lowerCAmelCase__ : Tuple = cnt
cnt += 1
with open(_SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
lowerCAmelCase__ : List[Any] = line.strip().split()[0]
lowerCAmelCase__ : Optional[int] = len(self.fairseq_tokens_to_ids )
if str(_SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids:
lowerCAmelCase__ : Optional[int] = len(self.fairseq_tokens_to_ids )
lowerCAmelCase__ : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Tuple )-> List[str]:
lowerCAmelCase__ : List[Any] = self.__dict__.copy()
lowerCAmelCase__ : Union[str, Any] = None
lowerCAmelCase__ : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] )-> Dict:
lowerCAmelCase__ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase__ : List[str] = {}
lowerCAmelCase__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase__( self : str , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : Optional[List[int]] = None )-> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ : List[str] = [self.cls_token_id]
lowerCAmelCase__ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase__( self : Tuple , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : Optional[List[int]] = None , _SCREAMING_SNAKE_CASE : bool = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def UpperCAmelCase__( self : str , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : Optional[List[int]] = None )-> List[int]:
lowerCAmelCase__ : Tuple = [self.sep_token_id]
lowerCAmelCase__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase__( self : List[str] )-> Optional[Any]:
return len(self.fairseq_ids_to_tokens )
def UpperCAmelCase__( self : List[str] )-> str:
lowerCAmelCase__ : str = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : str )-> List[str]:
return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] )-> Optional[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def UpperCAmelCase__( self : List[str] , _SCREAMING_SNAKE_CASE : Optional[int] )-> Any:
return self.fairseq_ids_to_tokens[index]
def UpperCAmelCase__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] )-> Union[str, Any]:
lowerCAmelCase__ : Dict = ''''''.join(_SCREAMING_SNAKE_CASE ).replace(_SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def UpperCAmelCase__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[str] = None )-> Tuple[str]:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ : str = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase__ : Dict = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
lowerCAmelCase__ : Dict = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(_SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F'{str(_SCREAMING_SNAKE_CASE )} \n' )
return out_vocab_file, out_monolingual_vocab_file
| 211 |
def lowerCamelCase_ ( _a ):
"""simple docstring"""
if number < 0:
raise ValueError('''number must not be negative''' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 211 | 1 |
import argparse
import os
import re
SCREAMING_SNAKE_CASE :Dict = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
SCREAMING_SNAKE_CASE :Optional[Any] = re.compile(R'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
SCREAMING_SNAKE_CASE :Union[str, Any] = re.compile(R'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
SCREAMING_SNAKE_CASE :Any = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
SCREAMING_SNAKE_CASE :str = re.compile(R'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
SCREAMING_SNAKE_CASE :List[Any] = re.compile(R'''\[([^\]]+)\]''')
def _lowerCAmelCase ( lowerCAmelCase_ :str )->int:
'''simple docstring'''
snake_case_ = _re_indent.search(lowerCAmelCase_ )
return "" if search is None else search.groups()[0]
def _lowerCAmelCase ( lowerCAmelCase_ :str , lowerCAmelCase_ :Optional[int]="" , lowerCAmelCase_ :Optional[int]=None , lowerCAmelCase_ :List[str]=None )->Optional[int]:
'''simple docstring'''
snake_case_ = 0
snake_case_ = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(lowerCAmelCase_ ):
index += 1
snake_case_ = ["\n".join(lines[:index] )]
else:
snake_case_ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
snake_case_ = [lines[index]]
index += 1
while index < len(lowerCAmelCase_ ) and (end_prompt is None or not lines[index].startswith(lowerCAmelCase_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowerCAmelCase_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(lowerCAmelCase_ ) )
if index < len(lowerCAmelCase_ ) - 1:
snake_case_ = [lines[index + 1]]
index += 1
else:
snake_case_ = []
else:
blocks.append("\n".join(lowerCAmelCase_ ) )
snake_case_ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowerCAmelCase_ ) > 0:
blocks.append("\n".join(lowerCAmelCase_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCAmelCase_ ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def _lowerCAmelCase ( lowerCAmelCase_ :Tuple )->Dict:
'''simple docstring'''
def _inner(lowerCAmelCase_ :Optional[int] ):
return key(lowerCAmelCase_ ).lower().replace("_" , "" )
return _inner
def _lowerCAmelCase ( lowerCAmelCase_ :List[Any] , lowerCAmelCase_ :Tuple=None )->Tuple:
'''simple docstring'''
def noop(lowerCAmelCase_ :Optional[int] ):
return x
if key is None:
snake_case_ = noop
# Constants are all uppercase, they go first.
snake_case_ = [obj for obj in objects if key(lowerCAmelCase_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
snake_case_ = [obj for obj in objects if key(lowerCAmelCase_ )[0].isupper() and not key(lowerCAmelCase_ ).isupper()]
# Functions begin with a lowercase, they go last.
snake_case_ = [obj for obj in objects if not key(lowerCAmelCase_ )[0].isupper()]
snake_case_ = ignore_underscore(lowerCAmelCase_ )
return sorted(lowerCAmelCase_ , key=lowerCAmelCase_ ) + sorted(lowerCAmelCase_ , key=lowerCAmelCase_ ) + sorted(lowerCAmelCase_ , key=lowerCAmelCase_ )
def _lowerCAmelCase ( lowerCAmelCase_ :str )->List[str]:
'''simple docstring'''
def _replace(lowerCAmelCase_ :int ):
snake_case_ = match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
snake_case_ = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
snake_case_ = keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(lowerCAmelCase_ )] ) + "]"
snake_case_ = import_statement.split("\n" )
if len(lowerCAmelCase_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
snake_case_ = 2 if lines[1].strip() == "[" else 1
snake_case_ = [(i, _re_strip_line.search(lowerCAmelCase_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
snake_case_ = sort_objects(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x[1] )
snake_case_ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowerCAmelCase_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
snake_case_ = _re_bracket_content.sub(_replace , lines[1] )
else:
snake_case_ = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
snake_case_ = keys[:-1]
snake_case_ = get_indent(lines[1] ) + ", ".join([F'''"{k}"''' for k in sort_objects(lowerCAmelCase_ )] )
return "\n".join(lowerCAmelCase_ )
else:
# Finally we have to deal with imports fitting on one line
snake_case_ = _re_bracket_content.sub(_replace , lowerCAmelCase_ )
return import_statement
def _lowerCAmelCase ( lowerCAmelCase_ :Union[str, Any] , lowerCAmelCase_ :str=True )->Optional[Any]:
'''simple docstring'''
with open(lowerCAmelCase_ , "r" ) as f:
snake_case_ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
snake_case_ = split_code_in_indented_blocks(
lowerCAmelCase_ , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowerCAmelCase_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
snake_case_ = main_blocks[block_idx]
snake_case_ = block.split("\n" )
# Get to the start of the imports.
snake_case_ = 0
while line_idx < len(lowerCAmelCase_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
snake_case_ = len(lowerCAmelCase_ )
else:
line_idx += 1
if line_idx >= len(lowerCAmelCase_ ):
continue
# Ignore beginning and last line: they don't contain anything.
snake_case_ = "\n".join(block_lines[line_idx:-1] )
snake_case_ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
snake_case_ = split_code_in_indented_blocks(lowerCAmelCase_ , indent_level=lowerCAmelCase_ )
# We have two categories of import key: list or _import_structure[key].append/extend
snake_case_ = _re_direct_key if "_import_structure" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
snake_case_ = [(pattern.search(lowerCAmelCase_ ).groups()[0] if pattern.search(lowerCAmelCase_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
snake_case_ = [(i, key) for i, key in enumerate(lowerCAmelCase_ ) if key is not None]
snake_case_ = [x[0] for x in sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
snake_case_ = 0
snake_case_ = []
for i in range(len(lowerCAmelCase_ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
snake_case_ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(lowerCAmelCase_ )
count += 1
# And we put our main block back together with its first and last line.
snake_case_ = "\n".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(lowerCAmelCase_ ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(lowerCAmelCase_ , "w" ) as f:
f.write("\n".join(lowerCAmelCase_ ) )
def _lowerCAmelCase ( lowerCAmelCase_ :Union[str, Any]=True )->str:
'''simple docstring'''
snake_case_ = []
for root, _, files in os.walk(lowerCAmelCase_ ):
if "__init__.py" in files:
snake_case_ = sort_imports(os.path.join(lowerCAmelCase_ , "__init__.py" ) , check_only=lowerCAmelCase_ )
if result:
snake_case_ = [os.path.join(lowerCAmelCase_ , "__init__.py" )]
if len(lowerCAmelCase_ ) > 0:
raise ValueError(F'''Would overwrite {len(lowerCAmelCase_ )} files, run `make style`.''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
SCREAMING_SNAKE_CASE :Union[str, Any] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 159 |
def _lowerCAmelCase ( lowerCAmelCase_ :int | float | str )->tuple[int, int]:
'''simple docstring'''
try:
snake_case_ = float(lowerCAmelCase_ )
except ValueError:
raise ValueError("Please enter a valid number" )
snake_case_ = decimal - int(lowerCAmelCase_ )
if fractional_part == 0:
return int(lowerCAmelCase_ ), 1
else:
snake_case_ = len(str(lowerCAmelCase_ ).split("." )[1] )
snake_case_ = int(decimal * (10**number_of_frac_digits) )
snake_case_ = 10**number_of_frac_digits
snake_case_ , snake_case_ = denominator, numerator
while True:
snake_case_ = dividend % divisor
if remainder == 0:
break
snake_case_ , snake_case_ = divisor, remainder
snake_case_ , snake_case_ = numerator / divisor, denominator / divisor
return int(lowerCAmelCase_ ), int(lowerCAmelCase_ )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(89.0) = }''')
print(F'''{decimal_to_fraction('67') = }''')
print(F'''{decimal_to_fraction('45.0') = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction('6.25') = }''')
print(F'''{decimal_to_fraction('78td') = }''')
| 159 | 1 |
def lowerCAmelCase_ ( _snake_case : str , _snake_case : bool = False ) -> Union[str, Any]:
'''simple docstring'''
if not isinstance(A__ , A__ ):
__magic_name__ : Union[str, Any] = F'''Expected string as input, found {type(A__ )}'''
raise ValueError(A__ )
if not isinstance(A__ , A__ ):
__magic_name__ : str = F'''Expected boolean as use_pascal parameter, found {type(A__ )}'''
raise ValueError(A__ )
__magic_name__ : Any = input_str.split("_" )
__magic_name__ : List[Any] = 0 if use_pascal else 1
__magic_name__ : Dict = words[start_index:]
__magic_name__ : Any = [word[0].upper() + word[1:] for word in words_to_capitalize]
__magic_name__ : List[Any] = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 357 |
from typing import Any
class _snake_case :
def __init__( self , _a ):
__magic_name__ : Union[str, Any] = data
__magic_name__ : str = None
class _snake_case :
def __init__( self ):
__magic_name__ : List[str] = None
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = self.head
while temp is not None:
print(temp.data , end=" " )
__magic_name__ : Optional[int] = temp.next
print()
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Union[str, Any] = Node(_a )
__magic_name__ : List[str] = self.head
__magic_name__ : Union[str, Any] = new_node
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
if node_data_a == node_data_a:
return
else:
__magic_name__ : Optional[Any] = self.head
while node_a is not None and node_a.data != node_data_a:
__magic_name__ : Tuple = node_a.next
__magic_name__ : Dict = self.head
while node_a is not None and node_a.data != node_data_a:
__magic_name__ : List[Any] = node_a.next
if node_a is None or node_a is None:
return
__magic_name__ , __magic_name__ : Optional[int] = node_a.data, node_a.data
if __name__ == "__main__":
snake_case : Optional[int] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("After swapping")
ll.print_list()
| 41 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__: Optional[int] = logging.get_logger(__name__)
UpperCamelCase__: List[str] = {"vocab_file": "sentencepiece.model"}
UpperCamelCase__: Tuple = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
}
UpperCamelCase__: Optional[int] = {
"google/rembert": 256,
}
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , __snake_case : str , __snake_case : Optional[int]=False , __snake_case : Tuple=True , __snake_case : int=True , __snake_case : str="[CLS]" , __snake_case : Tuple="[SEP]" , __snake_case : str="[UNK]" , __snake_case : Optional[Any]="[SEP]" , __snake_case : Union[str, Any]="[PAD]" , __snake_case : Dict="[CLS]" , __snake_case : Optional[Any]="[MASK]" , **__snake_case : List[Any] , ) -> int:
super().__init__(
do_lower_case=__snake_case , remove_space=__snake_case , keep_accents=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , **__snake_case , )
UpperCAmelCase : Tuple = do_lower_case
UpperCAmelCase : Dict = remove_space
UpperCAmelCase : Any = keep_accents
UpperCAmelCase : Union[str, Any] = vocab_file
UpperCAmelCase : int = spm.SentencePieceProcessor()
self.sp_model.Load(__snake_case )
@property
def A ( self : Union[str, Any] ) -> Optional[int]:
return len(self.sp_model )
def A ( self : Tuple ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = {self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ) -> Optional[Any]:
UpperCAmelCase : List[Any] = self.__dict__.copy()
UpperCAmelCase : Dict = None
return state
def __setstate__( self : Any , __snake_case : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase : List[str] = d
UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def A ( self : Tuple , __snake_case : Any , __snake_case : Union[str, Any]=False ) -> Union[str, Any]:
UpperCAmelCase : int = self.sp_model.EncodeAsPieces(__snake_case )
return pieces
def A ( self : str , __snake_case : Union[str, Any] ) -> int:
return self.sp_model.PieceToId(__snake_case )
def A ( self : str , __snake_case : str ) -> List[Any]:
return self.sp_model.IdToPiece(__snake_case )
def A ( self : int , __snake_case : Any ) -> List[Any]:
UpperCAmelCase : Optional[int] = self.sp_model.decode_pieces(__snake_case )
return out_string
def A ( self : Dict , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase : List[Any] = [self.sep_token_id]
UpperCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A ( self : Optional[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case )) + [1]
return [1] + ([0] * len(__snake_case )) + [1]
def A ( self : Optional[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase : str = [self.sep_token_id]
UpperCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A ( self : int , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__snake_case ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(__snake_case ) )
return
UpperCAmelCase : str = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
return (out_vocab_file,)
| 23 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ : Any = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''distilbert'''
lowerCamelCase = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=512 , _lowerCamelCase=False , _lowerCamelCase=6 , _lowerCamelCase=12 , _lowerCamelCase=768 , _lowerCamelCase=4 * 768 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=0.02 , _lowerCamelCase=0.1 , _lowerCamelCase=0.2 , _lowerCamelCase=0 , **_lowerCamelCase , ) -> Optional[Any]:
A_ : Tuple = vocab_size
A_ : List[Any] = max_position_embeddings
A_ : int = sinusoidal_pos_embds
A_ : int = n_layers
A_ : str = n_heads
A_ : Optional[int] = dim
A_ : int = hidden_dim
A_ : Tuple = dropout
A_ : List[Any] = attention_dropout
A_ : int = activation
A_ : Dict = initializer_range
A_ : List[Any] = qa_dropout
A_ : int = seq_classif_dropout
super().__init__(**_lowerCamelCase , pad_token_id=_lowerCamelCase )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A_ : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 344 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__A = {
"vocab_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json",
},
"merges_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt",
},
"tokenizer_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json",
},
}
__A = {
"gpt2": 10_24,
"gpt2-medium": 10_24,
"gpt2-large": 10_24,
"gpt2-xl": 10_24,
"distilgpt2": 10_24,
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ['''input_ids''', '''attention_mask''']
snake_case_ = GPTaTokenizer
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="<|endoftext|>" , lowerCamelCase__="<|endoftext|>" , lowerCamelCase__="<|endoftext|>" , lowerCamelCase__=False , **lowerCamelCase__ , ) -> str:
'''simple docstring'''
super().__init__(
lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , unk_token=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , **lowerCamelCase__ , )
__lowerCamelCase = kwargs.pop('add_bos_token' , lowerCamelCase__ )
__lowerCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowerCamelCase__ ) != add_prefix_space:
__lowerCamelCase = getattr(lowerCamelCase__ , pre_tok_state.pop('type' ) )
__lowerCamelCase = add_prefix_space
__lowerCamelCase = pre_tok_class(**lowerCamelCase__ )
__lowerCamelCase = add_prefix_space
def lowercase_ ( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> BatchEncoding:
'''simple docstring'''
__lowerCamelCase = kwargs.get('is_split_into_words' , lowerCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def lowercase_ ( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> BatchEncoding:
'''simple docstring'''
__lowerCamelCase = kwargs.get('is_split_into_words' , lowerCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
__lowerCamelCase = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> List[int]:
'''simple docstring'''
__lowerCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) + [self.eos_token_id] )
if len(lowerCamelCase__ ) > self.model_max_length:
__lowerCamelCase = input_ids[-self.model_max_length :]
return input_ids
| 348 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=14 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=0.02 , ) -> int:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = rotary_dim
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = initializer_range
__lowerCamelCase = None
__lowerCamelCase = vocab_size - 1
__lowerCamelCase = vocab_size - 1
__lowerCamelCase = vocab_size - 1
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCamelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = 20
__lowerCamelCase = model_class_name(lowerCamelCase__ )
__lowerCamelCase = model.init_cache(input_ids.shape[0] , lowerCamelCase__ )
__lowerCamelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
__lowerCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCamelCase = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , position_ids=lowerCamelCase__ , )
__lowerCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCamelCase = model(
input_ids[:, -1:] , attention_mask=lowerCamelCase__ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCamelCase__ , )
__lowerCamelCase = model(lowerCamelCase__ )
__lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = 20
__lowerCamelCase = model_class_name(lowerCamelCase__ )
__lowerCamelCase = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__lowerCamelCase = model.init_cache(input_ids.shape[0] , lowerCamelCase__ )
__lowerCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCamelCase = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , position_ids=lowerCamelCase__ , )
__lowerCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCamelCase = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCamelCase__ , position_ids=lowerCamelCase__ , )
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
__lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
snake_case_ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = FlaxGPTJModelTester(self )
def lowercase_ ( self ) -> str:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@tooslow
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
__lowerCamelCase = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=lowerCamelCase__ , truncation=lowerCamelCase__ )
__lowerCamelCase = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCamelCase = False
__lowerCamelCase = model.config.eos_token_id
__lowerCamelCase = jax.jit(model.generate )
__lowerCamelCase = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
__lowerCamelCase = tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
__lowerCamelCase = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@is_pt_flax_cross_test
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCamelCase = getattr(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase = pt_inputs['input_ids'].shape
__lowerCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase__ ):
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = pt_model_class(lowerCamelCase__ ).eval()
__lowerCamelCase = model_class(lowerCamelCase__ , dtype=jnp.floataa )
__lowerCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase__ )
__lowerCamelCase = fx_state
with torch.no_grad():
__lowerCamelCase = pt_model(**lowerCamelCase__ ).to_tuple()
__lowerCamelCase = fx_model(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase__ )
__lowerCamelCase = model_class.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
__lowerCamelCase = fx_model_loaded(**lowerCamelCase__ ).to_tuple()
self.assertEqual(
len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCamelCase = getattr(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = pt_model_class(lowerCamelCase__ ).eval()
__lowerCamelCase = model_class(lowerCamelCase__ , dtype=jnp.floataa )
__lowerCamelCase = load_flax_weights_in_pytorch_model(lowerCamelCase__ , fx_model.params )
__lowerCamelCase , __lowerCamelCase = pt_inputs['input_ids'].shape
__lowerCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase__ ):
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = 0
__lowerCamelCase = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__lowerCamelCase = pt_model(**lowerCamelCase__ ).to_tuple()
__lowerCamelCase = fx_model(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase__ )
__lowerCamelCase = pt_model_class.from_pretrained(lowerCamelCase__ , from_flax=lowerCamelCase__ )
with torch.no_grad():
__lowerCamelCase = pt_model_loaded(**lowerCamelCase__ ).to_tuple()
self.assertEqual(
len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCamelCase = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
| 348 | 1 |
def __snake_case ( _UpperCAmelCase = 600851475143 ):
try:
__a = int(_UpperCAmelCase )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
__a = 2
__a = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__a = i
while n % i == 0:
__a = n // i
i += 1
return int(_UpperCAmelCase )
if __name__ == "__main__":
print(f'{solution() = }')
| 49 |
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
__snake_case :List[Any] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _A ( __UpperCAmelCase ):
def __init__( self : Dict , **__SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE)
requires_backends(self , '''vision''')
requires_backends(self , '''torch''')
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.')
self.check_model_type(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any , **__SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
__a = {}
__a = {}
__a = {}
# preprocess args
if "points_per_batch" in kwargs:
__a = kwargs['''points_per_batch''']
if "points_per_crop" in kwargs:
__a = kwargs['''points_per_crop''']
if "crops_n_layers" in kwargs:
__a = kwargs['''crops_n_layers''']
if "crop_overlap_ratio" in kwargs:
__a = kwargs['''crop_overlap_ratio''']
if "crop_n_points_downscale_factor" in kwargs:
__a = kwargs['''crop_n_points_downscale_factor''']
# postprocess args
if "pred_iou_thresh" in kwargs:
__a = kwargs['''pred_iou_thresh''']
if "stability_score_offset" in kwargs:
__a = kwargs['''stability_score_offset''']
if "mask_threshold" in kwargs:
__a = kwargs['''mask_threshold''']
if "stability_score_thresh" in kwargs:
__a = kwargs['''stability_score_thresh''']
if "crops_nms_thresh" in kwargs:
__a = kwargs['''crops_nms_thresh''']
if "output_rle_mask" in kwargs:
__a = kwargs['''output_rle_mask''']
if "output_bboxes_mask" in kwargs:
__a = kwargs['''output_bboxes_mask''']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , *__SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Any=None , **__SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
return super().__call__(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , num_workers=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any=64 , __SCREAMING_SNAKE_CASE : int = 0 , __SCREAMING_SNAKE_CASE : float = 512 / 1_500 , __SCREAMING_SNAKE_CASE : Optional[int] = 32 , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , ):
'''simple docstring'''
__a = load_image(__SCREAMING_SNAKE_CASE)
__a = self.image_processor.size['''longest_edge''']
__a , __a , __a , __a = self.image_processor.generate_crop_boxes(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''')
with self.device_placement():
if self.framework == "pt":
__a = self.get_inference_context()
with inference_context():
__a = self._ensure_tensor_on_device(__SCREAMING_SNAKE_CASE , device=self.device)
__a = self.model.get_image_embeddings(model_inputs.pop('''pixel_values'''))
__a = image_embeddings
__a = grid_points.shape[1]
__a = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '''
'''To return all points at once, set points_per_batch to None''')
for i in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = grid_points[:, i : i + points_per_batch, :, :]
__a = input_labels[:, i : i + points_per_batch]
__a = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int=0.88 , __SCREAMING_SNAKE_CASE : List[Any]=0.95 , __SCREAMING_SNAKE_CASE : List[Any]=0 , __SCREAMING_SNAKE_CASE : int=1 , ):
'''simple docstring'''
__a = model_inputs.pop('''input_boxes''')
__a = model_inputs.pop('''is_last''')
__a = model_inputs.pop('''original_sizes''').tolist()
__a = model_inputs.pop('''reshaped_input_sizes''').tolist()
__a = self.model(**__SCREAMING_SNAKE_CASE)
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
__a = model_outputs['''pred_masks''']
__a = self.image_processor.post_process_masks(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , binarize=__SCREAMING_SNAKE_CASE)
__a = model_outputs['''iou_scores''']
__a , __a , __a = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : int=0.7 , ):
'''simple docstring'''
__a = []
__a = []
__a = []
for model_output in model_outputs:
all_scores.append(model_output.pop('''iou_scores'''))
all_masks.extend(model_output.pop('''masks'''))
all_boxes.append(model_output.pop('''boxes'''))
__a = torch.cat(__SCREAMING_SNAKE_CASE)
__a = torch.cat(__SCREAMING_SNAKE_CASE)
__a , __a , __a , __a = self.image_processor.post_process_for_mask_generation(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = defaultdict(__SCREAMING_SNAKE_CASE)
for output in model_outputs:
for k, v in output.items():
extra[k].append(__SCREAMING_SNAKE_CASE)
__a = {}
if output_rle_mask:
__a = rle_mask
if output_bboxes_mask:
__a = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 49 | 1 |
from collections.abc import Callable
import numpy as np
def lowerCamelCase__ ( UpperCamelCase__ : Callable , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> np.array:
'''simple docstring'''
_snake_case = int(np.ceil((x_end - xa) / step_size ) )
_snake_case = np.zeros((n + 1,) )
_snake_case = ya
_snake_case = xa
for k in range(UpperCamelCase__ ):
_snake_case = y[k] + step_size * ode_func(UpperCamelCase__ , y[k] )
_snake_case = y[k] + (
(step_size / 2) * (ode_func(UpperCamelCase__ , y[k] ) + ode_func(x + step_size , UpperCamelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 295 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCamelCase_ ( enum.Enum ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = 1
lowerCAmelCase_ = 2
@add_end_docstrings(_lowerCamelCase )
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_snake_case = None
if self.model.config.prefix is not None:
_snake_case = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_snake_case = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_snake_case , _snake_case , _snake_case = self._sanitize_parameters(prefix=lowerCAmelCase_ , **self._forward_params )
_snake_case = {**self._preprocess_params, **preprocess_params}
_snake_case = {**self._forward_params, **forward_params}
def lowerCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> Tuple:
_snake_case = {}
if prefix is not None:
_snake_case = prefix
if prefix:
_snake_case = self.tokenizer(
lowerCAmelCase_ , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework )
_snake_case = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
' [None, \'hole\']' )
_snake_case = handle_long_generation
preprocess_params.update(lowerCAmelCase_ )
_snake_case = generate_kwargs
_snake_case = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
_snake_case = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
_snake_case = ReturnType.TENSORS
if return_type is not None:
_snake_case = return_type
if clean_up_tokenization_spaces is not None:
_snake_case = clean_up_tokenization_spaces
if stop_sequence is not None:
_snake_case = self.tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
_snake_case = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[str]:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[Any]:
return super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_="" , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Any:
_snake_case = self.tokenizer(
prefix + prompt_text , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework )
_snake_case = prompt_text
if handle_long_generation == "hole":
_snake_case = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
_snake_case = generate_kwargs['max_new_tokens']
else:
_snake_case = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_snake_case = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
_snake_case = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
_snake_case = inputs['attention_mask'][:, -keep_length:]
return inputs
def lowerCAmelCase ( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = model_inputs['input_ids']
_snake_case = model_inputs.get('attention_mask' , lowerCAmelCase_ )
# Allow empty prompts
if input_ids.shape[1] == 0:
_snake_case = None
_snake_case = None
_snake_case = 1
else:
_snake_case = input_ids.shape[0]
_snake_case = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_snake_case = generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
_snake_case = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
_snake_case = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_snake_case = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_snake_case = self.model.generate(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = generated_sequence.shape[0]
if self.framework == "pt":
_snake_case = generated_sequence.reshape(lowerCAmelCase_ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_snake_case = tf.reshape(lowerCAmelCase_ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=ReturnType.FULL_TEXT , lowerCAmelCase_=True ) -> int:
_snake_case = model_outputs['generated_sequence'][0]
_snake_case = model_outputs['input_ids']
_snake_case = model_outputs['prompt_text']
_snake_case = generated_sequence.numpy().tolist()
_snake_case = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_snake_case = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_snake_case = self.tokenizer.decode(
lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_snake_case = 0
else:
_snake_case = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , ) )
if return_type == ReturnType.FULL_TEXT:
_snake_case = prompt_text + text[prompt_length:]
else:
_snake_case = text[prompt_length:]
_snake_case = {'generated_text': all_text}
records.append(lowerCAmelCase_ )
return records
| 295 | 1 |
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=A_ ):
A__ : List[str] = ["keras_nlp"]
def __init__(self : Any , *snake_case__ : Any , **snake_case__ : Dict ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["keras_nlp"] )
| 59 |
from __future__ import annotations
import math
def lowerCAmelCase_ ( _lowercase : int) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowercase) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_lowercase : str =[num for num in range(3, 10_0001, 2) if not is_prime(num)]
def lowerCAmelCase_ ( _lowercase : int) -> list[int]:
"""simple docstring"""
if not isinstance(_lowercase , _lowercase):
raise ValueError("""n must be an integer""")
if n <= 0:
raise ValueError("""n must be >= 0""")
a__ : int = []
for num in range(len(_lowercase)):
a__ : Any = 0
while 2 * i * i <= odd_composites[num]:
a__ : Union[str, Any] = odd_composites[num] - 2 * i * i
if is_prime(_lowercase):
break
i += 1
else:
list_nums.append(odd_composites[num])
if len(_lowercase) == n:
return list_nums
return []
def lowerCAmelCase_ ( ) -> int:
"""simple docstring"""
return compute_nums(1)[0]
if __name__ == "__main__":
print(f'{solution() = }')
| 170 | 0 |
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
lowerCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase )
class __A( __lowerCamelCase ):
"""simple docstring"""
def __init__(self , **SCREAMING_SNAKE_CASE_ ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
self.check_model_type(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = {}
UpperCamelCase__ = {}
UpperCamelCase__ = {}
# preprocess args
if "points_per_batch" in kwargs:
UpperCamelCase__ = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
UpperCamelCase__ = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
UpperCamelCase__ = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
UpperCamelCase__ = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
UpperCamelCase__ = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
UpperCamelCase__ = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
UpperCamelCase__ = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
UpperCamelCase__ = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
UpperCamelCase__ = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
UpperCamelCase__ = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
UpperCamelCase__ = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
UpperCamelCase__ = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__(self , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ):
return super().__call__(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , num_workers=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 5_12 / 15_00 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = 1 , ):
UpperCamelCase__ = load_image(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.image_processor.size["""longest_edge"""]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.image_processor.generate_crop_boxes(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
UpperCamelCase__ = self.get_inference_context()
with inference_context():
UpperCamelCase__ = self._ensure_tensor_on_device(SCREAMING_SNAKE_CASE_ , device=self.device )
UpperCamelCase__ = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
UpperCamelCase__ = image_embeddings
UpperCamelCase__ = grid_points.shape[1]
UpperCamelCase__ = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = grid_points[:, i : i + points_per_batch, :, :]
UpperCamelCase__ = input_labels[:, i : i + points_per_batch]
UpperCamelCase__ = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.88 , SCREAMING_SNAKE_CASE_=0.95 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=1 , ):
UpperCamelCase__ = model_inputs.pop("""input_boxes""" )
UpperCamelCase__ = model_inputs.pop("""is_last""" )
UpperCamelCase__ = model_inputs.pop("""original_sizes""" ).tolist()
UpperCamelCase__ = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
UpperCamelCase__ = self.model(**SCREAMING_SNAKE_CASE_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
UpperCamelCase__ = model_outputs["""pred_masks"""]
UpperCamelCase__ = self.image_processor.post_process_masks(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , binarize=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model_outputs["""iou_scores"""]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.7 , ):
UpperCamelCase__ = []
UpperCamelCase__ = []
UpperCamelCase__ = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
UpperCamelCase__ = torch.cat(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.cat(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.image_processor.post_process_for_mask_generation(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = defaultdict(SCREAMING_SNAKE_CASE_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = {}
if output_rle_mask:
UpperCamelCase__ = rle_mask
if output_bboxes_mask:
UpperCamelCase__ = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 178 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __magic_name__ ( __a : Any ): # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __magic_name__ ( ):
'''simple docstring'''
with parallel_backend("""spark""" ):
assert ParallelBackendConfig.backend_name == "spark"
UpperCamelCase__ = [1, 2, 3]
with pytest.raises(__a ):
with parallel_backend("""unsupported backend""" ):
map_nested(__a , __a , num_proc=2 )
with pytest.raises(__a ):
with parallel_backend("""unsupported backend""" ):
map_nested(__a , __a , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("""num_proc""" , [2, -1] )
def __magic_name__ ( __a : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ = [1, 2]
UpperCamelCase__ = {"""a""": 1, """b""": 2}
UpperCamelCase__ = {"""a""": [1, 2], """b""": [3, 4]}
UpperCamelCase__ = {"""a""": {"""1""": 1}, """b""": 2}
UpperCamelCase__ = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
UpperCamelCase__ = [2, 3]
UpperCamelCase__ = {"""a""": 2, """b""": 3}
UpperCamelCase__ = {"""a""": [2, 3], """b""": [4, 5]}
UpperCamelCase__ = {"""a""": {"""1""": 2}, """b""": 3}
UpperCamelCase__ = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
with parallel_backend("""spark""" ):
assert map_nested(__a , __a , num_proc=__a ) == expected_map_nested_sa
assert map_nested(__a , __a , num_proc=__a ) == expected_map_nested_sa
assert map_nested(__a , __a , num_proc=__a ) == expected_map_nested_sa
assert map_nested(__a , __a , num_proc=__a ) == expected_map_nested_sa
assert map_nested(__a , __a , num_proc=__a ) == expected_map_nested_sa
| 178 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def _lowercase ( __A ):
'''simple docstring'''
if num <= 0:
__UpperCamelCase = f"{num}: Invalid input, please enter a positive integer."
raise ValueError(__A )
__UpperCamelCase = [True] * (num + 1)
__UpperCamelCase = []
__UpperCamelCase = 2
__UpperCamelCase = int(math.sqrt(__A ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(__A )
# Set multiples of start be False
for i in range(start * start ,num + 1 ,__A ):
if sieve[i] is True:
__UpperCamelCase = False
start += 1
for j in range(end + 1 ,num + 1 ):
if sieve[j] is True:
prime.append(__A )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 349 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
a__ : Any = logging.get_logger(__name__)
a__ : Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
a__ : List[str] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = {}
with open(__A ,"""r""" ) as file:
for line_number, line in enumerate(__A ):
__UpperCamelCase = line.strip()
if line:
__UpperCamelCase = line.split()
__UpperCamelCase = line_number
__UpperCamelCase = words[0]
__UpperCamelCase = value
return result
def _lowercase ( __A ,__A ,__A ,__A ,__A ):
'''simple docstring'''
for attribute in key.split(""".""" ):
__UpperCamelCase = getattr(__A ,__A )
__UpperCamelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__A ):
__UpperCamelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__UpperCamelCase = """param"""
if weight_type is not None and weight_type != "param":
__UpperCamelCase = getattr(__A ,__A ).shape
elif weight_type is not None and weight_type == "param":
__UpperCamelCase = hf_pointer
for attribute in hf_param_name.split(""".""" ):
__UpperCamelCase = getattr(__A ,__A )
__UpperCamelCase = shape_pointer.shape
# let's reduce dimension
__UpperCamelCase = value[0]
else:
__UpperCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
__UpperCamelCase = value
elif weight_type == "weight_g":
__UpperCamelCase = value
elif weight_type == "weight_v":
__UpperCamelCase = value
elif weight_type == "bias":
__UpperCamelCase = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
__UpperCamelCase = getattr(__A ,__A )
__UpperCamelCase = value
else:
__UpperCamelCase = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _lowercase ( __A ,__A ,__A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__A ):
__UpperCamelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__UpperCamelCase = """param"""
if weight_type is not None and weight_type != "param":
__UpperCamelCase = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__UpperCamelCase = """.""".join([key, hf_param_name] )
else:
__UpperCamelCase = key
__UpperCamelCase = value if """lm_head""" in full_key else value[0]
a__ : Dict = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _lowercase ( __A ,__A ,__A=None ,__A=None ):
'''simple docstring'''
__UpperCamelCase = False
for key, mapped_key in MAPPING.items():
__UpperCamelCase = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__UpperCamelCase = True
if "*" in mapped_key:
__UpperCamelCase = name.split(__A )[0].split(""".""" )[-2]
__UpperCamelCase = mapped_key.replace("""*""" ,__A )
if "weight_g" in name:
__UpperCamelCase = """weight_g"""
elif "weight_v" in name:
__UpperCamelCase = """weight_v"""
elif "bias" in name:
__UpperCamelCase = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__UpperCamelCase = """weight"""
else:
__UpperCamelCase = None
if hf_dict is not None:
rename_dict(__A ,__A ,__A ,__A ,__A )
else:
set_recursively(__A ,__A ,__A ,__A ,__A )
return is_used
return is_used
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = []
__UpperCamelCase = fairseq_model.state_dict()
__UpperCamelCase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__A ,__A ,__A ,__A ,hf_model.config.feat_extract_norm == """group""" ,)
__UpperCamelCase = True
else:
__UpperCamelCase = load_wavaveca_layer(__A ,__A ,__A )
if not is_used:
unused_weights.append(__A )
logger.warning(f"Unused weights: {unused_weights}" )
def _lowercase ( __A ,__A ,__A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = full_name.split("""conv_layers.""" )[-1]
__UpperCamelCase = name.split(""".""" )
__UpperCamelCase = int(items[0] )
__UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
__UpperCamelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
__UpperCamelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
__UpperCamelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
__UpperCamelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(__A )
@torch.no_grad()
def _lowercase ( __A ,__A ,__A=None ,__A=None ,__A=True ,__A=False ):
'''simple docstring'''
if config_path is not None:
__UpperCamelCase = WavaVecaConfig.from_pretrained(__A )
else:
__UpperCamelCase = WavaVecaConfig()
if is_seq_class:
__UpperCamelCase = read_txt_into_dict(__A )
__UpperCamelCase = idalabel
__UpperCamelCase = WavaVecaForSequenceClassification(__A )
__UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__A ,return_attention_mask=__A ,)
feature_extractor.save_pretrained(__A )
elif is_finetuned:
if dict_path:
__UpperCamelCase = Dictionary.load(__A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCamelCase = target_dict.pad_index
__UpperCamelCase = target_dict.bos_index
__UpperCamelCase = target_dict.eos_index
__UpperCamelCase = len(target_dict.symbols )
__UpperCamelCase = os.path.join(__A ,"""vocab.json""" )
if not os.path.isdir(__A ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__A ) )
return
os.makedirs(__A ,exist_ok=__A )
__UpperCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
__UpperCamelCase = 0
__UpperCamelCase = 1
with open(__A ,"""w""" ,encoding="""utf-8""" ) as vocab_handle:
json.dump(__A ,__A )
__UpperCamelCase = WavaVecaCTCTokenizer(
__A ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="""|""" ,do_lower_case=__A ,)
__UpperCamelCase = True if config.feat_extract_norm == """layer""" else False
__UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__A ,return_attention_mask=__A ,)
__UpperCamelCase = WavaVecaProcessor(feature_extractor=__A ,tokenizer=__A )
processor.save_pretrained(__A )
__UpperCamelCase = WavaVecaForCTC(__A )
else:
__UpperCamelCase = WavaVecaForPreTraining(__A )
if is_finetuned or is_seq_class:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__UpperCamelCase = argparse.Namespace(task="""audio_pretraining""" )
__UpperCamelCase = fairseq.tasks.setup_task(__A )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ,task=__A )
__UpperCamelCase = model[0].eval()
recursively_load_weights(__A ,__A ,not is_finetuned )
hf_wavavec.save_pretrained(__A )
if __name__ == "__main__":
a__ : int = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
a__ : Optional[int] = parser.parse_args()
a__ : str = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 349 | 1 |
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
a_ : List[str] = version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""")
def a_ ( __snake_case : List[str] , __snake_case : tuple , __snake_case : Path , __snake_case : Any , __snake_case : Any , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : Optional[int]=False , ) -> str:
"""simple docstring"""
output_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE__ , output_names=SCREAMING_SNAKE_CASE__ , dynamic_axes=SCREAMING_SNAKE_CASE__ , do_constant_folding=SCREAMING_SNAKE_CASE__ , use_external_data_format=SCREAMING_SNAKE_CASE__ , enable_onnx_checker=SCREAMING_SNAKE_CASE__ , opset_version=SCREAMING_SNAKE_CASE__ , )
else:
export(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE__ , output_names=SCREAMING_SNAKE_CASE__ , dynamic_axes=SCREAMING_SNAKE_CASE__ , do_constant_folding=SCREAMING_SNAKE_CASE__ , opset_version=SCREAMING_SNAKE_CASE__ , )
@torch.no_grad()
def a_ ( __snake_case : str , __snake_case : str , __snake_case : int , __snake_case : bool = False ) -> str:
"""simple docstring"""
lowerCamelCase_ =torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowerCamelCase_ ='''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
lowerCamelCase_ ='''cpu'''
lowerCamelCase_ =Path(SCREAMING_SNAKE_CASE__ )
# VAE DECODER
lowerCamelCase_ =AutoencoderKL.from_pretrained(model_path + '''/vae''' )
lowerCamelCase_ =vae_decoder.config.latent_channels
# forward only through the decoder part
lowerCamelCase_ =vae_decoder.decode
onnx_export(
SCREAMING_SNAKE_CASE__ , model_args=(
torch.randn(1 , SCREAMING_SNAKE_CASE__ , 25 , 25 ).to(device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ ),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=SCREAMING_SNAKE_CASE__ , )
del vae_decoder
if __name__ == "__main__":
a_ : str = argparse.ArgumentParser()
parser.add_argument(
"""--model_path""",
type=str,
required=True,
help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""",
)
parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--opset""",
default=14,
type=int,
help="""The version of the ONNX operator set to use.""",
)
parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""")
a_ : Union[str, Any] = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("""SD: Done: ONNX""")
| 366 |
'''simple docstring'''
from itertools import product
def a_ ( __snake_case : int , __snake_case : int ) -> list[int]:
"""simple docstring"""
lowerCamelCase_ =sides_number
lowerCamelCase_ =max_face_number * dice_number
lowerCamelCase_ =[0] * (max_total + 1)
lowerCamelCase_ =1
lowerCamelCase_ =range(__snake_case , max_face_number + 1 )
for dice_numbers in product(__snake_case , repeat=__snake_case ):
lowerCamelCase_ =sum(__snake_case )
totals_frequencies[total] += 1
return totals_frequencies
def a_ ( ) -> float:
"""simple docstring"""
lowerCamelCase_ =total_frequency_distribution(
sides_number=4 , dice_number=9 )
lowerCamelCase_ =total_frequency_distribution(
sides_number=6 , dice_number=6 )
lowerCamelCase_ =0
lowerCamelCase_ =9
lowerCamelCase_ =4 * 9
lowerCamelCase_ =6
for peter_total in range(__snake_case , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
lowerCamelCase_ =(4**9) * (6**6)
lowerCamelCase_ =peter_wins_count / total_games_number
lowerCamelCase_ =round(__snake_case , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"""{solution() = }""")
| 6 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
__lowerCAmelCase : List[str] ={
'''configuration_speecht5''': [
'''SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP''',
'''SpeechT5Config''',
'''SpeechT5HifiGanConfig''',
],
'''feature_extraction_speecht5''': ['''SpeechT5FeatureExtractor'''],
'''processing_speecht5''': ['''SpeechT5Processor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =['''SpeechT5Tokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =[
'''SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SpeechT5ForSpeechToText''',
'''SpeechT5ForSpeechToSpeech''',
'''SpeechT5ForTextToSpeech''',
'''SpeechT5Model''',
'''SpeechT5PreTrainedModel''',
'''SpeechT5HifiGan''',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Optional[int] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 197 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
lowerCAmelCase__ : List[str] = False
try:
lowerCAmelCase__ : str = _is_package_available('''google.colab''')
except ModuleNotFoundError:
pass
@input.register
class __snake_case :
def __init__( self , __UpperCamelCase = None , __UpperCamelCase = [] ) -> int:
'''simple docstring'''
snake_case__ : List[str] = 0
snake_case__ : List[str] = choices
snake_case__ : str = prompt
if sys.platform == "win32":
snake_case__ : str = '*'
else:
snake_case__ : Tuple = '➔ '
def __a ( self , __UpperCamelCase , __UpperCamelCase = "" ) -> Optional[int]:
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , __UpperCamelCase )
else:
forceWrite(self.choices[index] , __UpperCamelCase )
def __a ( self , __UpperCamelCase ) -> int:
'''simple docstring'''
if index == self.position:
forceWrite(F""" {self.arrow_char} """ )
self.write_choice(__UpperCamelCase )
else:
forceWrite(F""" {self.choices[index]}""" )
reset_cursor()
def __a ( self , __UpperCamelCase , __UpperCamelCase = 1 ) -> Any:
'''simple docstring'''
snake_case__ : Optional[int] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__UpperCamelCase )
move_cursor(__UpperCamelCase , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def __a ( self ) -> List[Any]:
'''simple docstring'''
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def __a ( self ) -> List[str]:
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def __a ( self ) -> str:
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__UpperCamelCase )] for number in range(10 )] )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Optional[Any] = int(chr(self.current_selection ) )
snake_case__ : Union[str, Any] = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , __UpperCamelCase )
else:
return
else:
return
def __a ( self , __UpperCamelCase = 0 ) -> str:
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
snake_case__ : Optional[Any] = default_choice
for i in range(len(self.choices ) ):
self.print_choice(__UpperCamelCase )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
snake_case__ : Union[str, Any] = int(builtins.input() )
except ValueError:
snake_case__ : Dict = default_choice
else:
snake_case__ : List[Any] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(__UpperCamelCase , '\n' )
return choice
| 143 | 0 |
'''simple docstring'''
import math
def snake_case_ (UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case_ (UpperCamelCase : int = 0.1 ):
'''simple docstring'''
_a = 3
_a = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(lowercase_ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case : List[str] = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : str = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_snake_case : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 179 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def a ( __a , __a , __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :List[Any] = MobileBertConfig.from_json_file(__a )
print(f'''Building PyTorch model from configuration: {config}''' )
UpperCamelCase__ :List[str] = MobileBertForPreTraining(__a )
# Load weights from tf checkpoint
UpperCamelCase__ :str = load_tf_weights_in_mobilebert(__a , __a , __a )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __a )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__snake_case = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 97 |
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
__snake_case = ''''''
__snake_case = ''''''
__snake_case = ''''''
__snake_case = ''''''
def a ( __a ) -> None:
'''simple docstring'''
UpperCamelCase__ :List[Any] = tweepy.OAuthHandler(__a , __a )
auth.set_access_token(__a , __a )
UpperCamelCase__ :List[str] = tweepy.API(__a )
# initialize a list to hold all the tweepy Tweets
UpperCamelCase__ :Dict = []
# make initial request for most recent tweets (200 is the maximum allowed count)
UpperCamelCase__ :Tuple = api.user_timeline(screen_name=__a , count=200 )
# save most recent tweets
alltweets.extend(__a )
# save the id of the oldest tweet less one
UpperCamelCase__ :Union[str, Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(__a ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
UpperCamelCase__ :Union[str, Any] = api.user_timeline(
screen_name=__a , count=200 , max_id=__a )
# save most recent tweets
alltweets.extend(__a )
# update the id of the oldest tweet less one
UpperCamelCase__ :Tuple = alltweets[-1].id - 1
print(f'''...{len(__a )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
UpperCamelCase__ :int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''' , '''w''' ) as f:
UpperCamelCase__ :Tuple = csv.writer(__a )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(__a )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 97 | 1 |
from __future__ import annotations
import math
import random
from typing import Any
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = 0
_UpperCamelCase = 0
def __UpperCAmelCase ( self : List[Any]) -> Union[str, Any]:
"""simple docstring"""
return self.head == self.tail
def __UpperCAmelCase ( self : str , lowercase_ : Any) -> Tuple:
"""simple docstring"""
self.data.append(_lowerCAmelCase)
_UpperCamelCase = self.tail + 1
def __UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
_UpperCamelCase = self.data[self.head]
_UpperCamelCase = self.head + 1
return ret
def __UpperCAmelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
return self.tail - self.head
def __UpperCAmelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
print(self.data)
print("**************")
print(self.data[self.head : self.tail])
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] , lowercase_ : Any) -> int:
"""simple docstring"""
_UpperCamelCase = data
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = 1
def __UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
return self.data
def __UpperCAmelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
return self.left
def __UpperCAmelCase ( self : Tuple) -> Dict:
"""simple docstring"""
return self.right
def __UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
return self.height
def __UpperCAmelCase ( self : int , lowercase_ : Any) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = data
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : MyNode | None) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = node
def __UpperCAmelCase ( self : List[Any] , lowercase_ : MyNode | None) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = node
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : int) -> str:
"""simple docstring"""
_UpperCamelCase = height
def lowerCAmelCase__ ( a__ ) ->List[str]:
'''simple docstring'''
if node is None:
return 0
return node.get_height()
def lowerCAmelCase__ ( a__ , a__ ) ->Optional[Any]:
'''simple docstring'''
if a > b:
return a
return b
def lowerCAmelCase__ ( a__ ) ->Dict:
'''simple docstring'''
print("left rotation node:" , node.get_data() )
_UpperCamelCase = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(_lowerCAmelCase )
_UpperCamelCase = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_lowerCAmelCase )
_UpperCamelCase = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(_lowerCAmelCase )
return ret
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
print("right rotation node:" , node.get_data() )
_UpperCamelCase = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(_lowerCAmelCase )
_UpperCamelCase = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_lowerCAmelCase )
_UpperCamelCase = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(_lowerCAmelCase )
return ret
def lowerCAmelCase__ ( a__ ) ->Any:
'''simple docstring'''
_UpperCamelCase = node.get_left()
assert left_child is not None
node.set_left(left_rotation(_lowerCAmelCase ) )
return right_rotation(_lowerCAmelCase )
def lowerCAmelCase__ ( a__ ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase = node.get_right()
assert right_child is not None
node.set_right(right_rotation(_lowerCAmelCase ) )
return left_rotation(_lowerCAmelCase )
def lowerCAmelCase__ ( a__ , a__ ) ->str:
'''simple docstring'''
if node is None:
return MyNode(_lowerCAmelCase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , _lowerCAmelCase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
_UpperCamelCase = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
_UpperCamelCase = right_rotation(_lowerCAmelCase )
else:
_UpperCamelCase = lr_rotation(_lowerCAmelCase )
else:
node.set_right(insert_node(node.get_right() , _lowerCAmelCase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
_UpperCamelCase = node.get_right()
assert right_child is not None
if data < right_child.get_data():
_UpperCamelCase = rl_rotation(_lowerCAmelCase )
else:
_UpperCamelCase = left_rotation(_lowerCAmelCase )
_UpperCamelCase = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_lowerCAmelCase )
return node
def lowerCAmelCase__ ( a__ ) ->Dict:
'''simple docstring'''
while True:
_UpperCamelCase = root.get_right()
if right_child is None:
break
_UpperCamelCase = right_child
return root.get_data()
def lowerCAmelCase__ ( a__ ) ->List[Any]:
'''simple docstring'''
while True:
_UpperCamelCase = root.get_left()
if left_child is None:
break
_UpperCamelCase = left_child
return root.get_data()
def lowerCAmelCase__ ( a__ , a__ ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase = root.get_left()
_UpperCamelCase = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
_UpperCamelCase = get_left_most(_lowerCAmelCase )
root.set_data(_lowerCAmelCase )
root.set_right(del_node(_lowerCAmelCase , _lowerCAmelCase ) )
elif left_child is not None:
_UpperCamelCase = left_child
elif right_child is not None:
_UpperCamelCase = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("No such data" )
return root
else:
root.set_left(del_node(_lowerCAmelCase , _lowerCAmelCase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(_lowerCAmelCase , _lowerCAmelCase ) )
if get_height(_lowerCAmelCase ) - get_height(_lowerCAmelCase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
_UpperCamelCase = left_rotation(_lowerCAmelCase )
else:
_UpperCamelCase = rl_rotation(_lowerCAmelCase )
elif get_height(_lowerCAmelCase ) - get_height(_lowerCAmelCase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
_UpperCamelCase = right_rotation(_lowerCAmelCase )
else:
_UpperCamelCase = lr_rotation(_lowerCAmelCase )
_UpperCamelCase = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(_lowerCAmelCase )
return root
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Tuple) -> str:
"""simple docstring"""
_UpperCamelCase = None
def __UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
return get_height(self.root)
def __UpperCAmelCase ( self : List[str] , lowercase_ : Any) -> List[str]:
"""simple docstring"""
print("insert:" + str(_lowerCAmelCase))
_UpperCamelCase = insert_node(self.root , _lowerCAmelCase)
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Any) -> int:
"""simple docstring"""
print("delete:" + str(_lowerCAmelCase))
if self.root is None:
print("Tree is empty!")
return
_UpperCamelCase = del_node(self.root , _lowerCAmelCase)
def __str__( self : int , ) -> Dict: # a level traversale, gives a more intuitive look on the tree
"""simple docstring"""
_UpperCamelCase = ""
_UpperCamelCase = MyQueue()
q.push(self.root)
_UpperCamelCase = self.get_height()
if layer == 0:
return output
_UpperCamelCase = 0
while not q.is_empty():
_UpperCamelCase = q.pop()
_UpperCamelCase = " " * int(math.pow(2 , layer - 1))
output += space
if node is None:
output += "*"
q.push(_lowerCAmelCase)
q.push(_lowerCAmelCase)
else:
output += str(node.get_data())
q.push(node.get_left())
q.push(node.get_right())
output += space
_UpperCamelCase = cnt + 1
for i in range(100):
if cnt == math.pow(2 , _lowerCAmelCase) - 1:
_UpperCamelCase = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def lowerCAmelCase__ ( ) ->str:
'''simple docstring'''
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
lowerCamelCase__ = AVLtree()
lowerCamelCase__ = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 354 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = MobileBertConfig.from_json_file(a__ )
print(f'Building PyTorch model from configuration: {config}' )
_UpperCamelCase = MobileBertForPreTraining(a__ )
# Load weights from tf checkpoint
_UpperCamelCase = load_tf_weights_in_mobilebert(a__ , a__ , a__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , a__ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 63 | 0 |
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = [
"word_embeddings_layernorm.weight",
"word_embeddings_layernorm.bias",
"input_layernorm.weight",
"input_layernorm.bias",
"post_attention_layernorm.weight",
"post_attention_layernorm.bias",
"self_attention.dense.bias",
"mlp.dense_4h_to_h.bias",
"ln_f.weight",
"ln_f.bias",
]
_UpperCamelCase = [
"mlp.dense_4h_to_h.weight",
"self_attention.dense.weight",
]
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : Any = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
__lowerCAmelCase : str = int(re.match(r'''.*layer_(\d*).*''' , lowercase__ )[1] )
layer_number -= 3
return f"""h.{layer_number}.""" + key
def _lowercase ( lowercase__ ):
if dtype == torch.bool:
return 1 / 8
__lowerCAmelCase : Dict = re.search(r'''[^\d](\d+)$''' , str(lowercase__ ) )
if bit_search is None:
raise ValueError(f"""`dtype` is not a valid dtype: {dtype}.""" )
__lowerCAmelCase : Dict = int(bit_search.groups()[0] )
return bit_size // 8
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
# Construct model
if bloom_config_file == "":
__lowerCAmelCase : Tuple = BloomConfig()
else:
__lowerCAmelCase : List[Any] = BloomConfig.from_json_file(lowercase__ )
if shard_model:
__lowerCAmelCase : Union[str, Any] = os.listdir(lowercase__ )
__lowerCAmelCase : Optional[int] = sorted(filter(lambda lowercase__ : s.startswith('''layer''' ) and "model_00" in s , lowercase__ ) )
__lowerCAmelCase : str = {'''weight_map''': {}, '''metadata''': {}}
__lowerCAmelCase : Optional[Any] = 0
__lowerCAmelCase : int = None
__lowerCAmelCase : List[Any] = BloomConfig()
for j, file in enumerate(lowercase__ ):
print('''Processing file: {}'''.format(lowercase__ ) )
__lowerCAmelCase : Union[str, Any] = None
for i in range(lowercase__ ):
# load all TP files
__lowerCAmelCase : str = file.replace('''model_00''' , f"""model_0{i}""" )
__lowerCAmelCase : Tuple = torch.load(os.path.join(lowercase__ , lowercase__ ) , map_location='''cpu''' )
# Rename keys in the transformers names
__lowerCAmelCase : int = list(temp.keys() )
for key in keys:
__lowerCAmelCase : List[str] = temp.pop(lowercase__ )
if tensors is None:
__lowerCAmelCase : List[Any] = temp
else:
for key in tensors.keys():
if any(key.endswith(lowercase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__lowerCAmelCase : Optional[Any] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__lowerCAmelCase : int = torch.cat([tensors[key], temp[key]] , dim=lowercase__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(lowercase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__lowerCAmelCase : int = tensors[key] / pretraining_tp
torch.save(
lowercase__ , os.path.join(
lowercase__ , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(lowercase__ ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
__lowerCAmelCase : Union[str, Any] = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
__lowerCAmelCase : int = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ) , str(len(lowercase__ ) ).zfill(5 ) )
__lowerCAmelCase : Optional[int] = BloomConfig()
__lowerCAmelCase : Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
__lowerCAmelCase : int = total_size
with open(lowercase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(lowercase__ , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f:
__lowerCAmelCase : int = json.dumps(lowercase__ , indent=2 , sort_keys=lowercase__ ) + '''\n'''
f.write(lowercase__ )
else:
__lowerCAmelCase : Dict = BloomModel(lowercase__ )
__lowerCAmelCase : Optional[Any] = os.listdir(lowercase__ )
__lowerCAmelCase : Dict = sorted(filter(lambda lowercase__ : s.startswith('''layer''' ) and "model_00" in s , lowercase__ ) )
__lowerCAmelCase : Union[str, Any] = None
for i, file in enumerate(lowercase__ ):
__lowerCAmelCase : List[Any] = None
for i in range(lowercase__ ):
# load all TP files
__lowerCAmelCase : Tuple = file.replace('''model_00''' , f"""model_0{i}""" )
__lowerCAmelCase : str = torch.load(os.path.join(lowercase__ , lowercase__ ) , map_location='''cpu''' )
# Rename keys in the transformers names
__lowerCAmelCase : Optional[Any] = list(temp.keys() )
for key in keys:
__lowerCAmelCase : List[Any] = temp.pop(lowercase__ )
if tensors is None:
__lowerCAmelCase : List[Any] = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(lowercase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__lowerCAmelCase : Dict = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__lowerCAmelCase : List[str] = torch.cat([tensors[key], temp[key]] , dim=lowercase__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(lowercase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__lowerCAmelCase : List[Any] = tensors[key] / pretraining_tp
__lowerCAmelCase : Optional[int] = model.load_state_dict(lowercase__ , strict=lowercase__ )
assert not other_keys.unexpected_keys, f"""The keys {other_keys.unexpected_keys} are unexpected"""
if missing_keys is None:
__lowerCAmelCase : Dict = set(other_keys.missing_keys )
else:
__lowerCAmelCase : List[Any] = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f"""The keys {missing_keys} are missing"""
# Save pytorch-model
os.makedirs(lowercase__ , exist_ok=lowercase__ )
__lowerCAmelCase : Any = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
__lowerCAmelCase : str = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}""" )
if config.torch_dtype is not None:
__lowerCAmelCase : Union[str, Any] = model.to(config.torch_dtype )
torch.save(model.state_dict() , lowercase__ )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowercase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bloom_checkpoint_path",
default=None,
type=str,
required=True,
help="Path to the Megatron-LM checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--bloom_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--shard_model",
action="store_true",
help="An optional setting to shard the output model \nThis enables sharding the converted checkpoint",
)
parser.add_argument(
"--pretraining_tp",
default=4,
type=int,
help="Pretraining TP rank that has been used when training the model in Megatron-LM \n",
)
_UpperCamelCase = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 275 |
def _lowercase ( lowercase__ , lowercase__ ):
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def _lowercase ( lowercase__ , lowercase__=0 ):
return sorted(lowercase__ , key=lambda lowercase__ : x[column] )
def _lowercase ( lowercase__ , lowercase__ , lowercase__=float('''inf''' ) ):
for i in range(points_counts - 1 ):
for j in range(i + 1 , lowercase__ ):
__lowerCAmelCase : List[str] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__lowerCAmelCase : Tuple = current_dis
return min_dis
def _lowercase ( lowercase__ , lowercase__ , lowercase__=float('''inf''' ) ):
for i in range(min(6 , points_counts - 1 ) , lowercase__ ):
for j in range(max(0 , i - 6 ) , lowercase__ ):
__lowerCAmelCase : Union[str, Any] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__lowerCAmelCase : int = current_dis
return min_dis
def _lowercase ( lowercase__ , lowercase__ , lowercase__ ):
# base case
if points_counts <= 3:
return dis_between_closest_pair(lowercase__ , lowercase__ )
# recursion
__lowerCAmelCase : Optional[Any] = points_counts // 2
__lowerCAmelCase : Optional[Any] = closest_pair_of_points_sqr(
lowercase__ , points_sorted_on_y[:mid] , lowercase__ )
__lowerCAmelCase : str = closest_pair_of_points_sqr(
lowercase__ , points_sorted_on_y[mid:] , points_counts - mid )
__lowerCAmelCase : Optional[int] = min(lowercase__ , lowercase__ )
__lowerCAmelCase : Tuple = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(lowercase__ )
__lowerCAmelCase : List[Any] = dis_between_closest_in_strip(
lowercase__ , len(lowercase__ ) , lowercase__ )
return min(lowercase__ , lowercase__ )
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : Union[str, Any] = column_based_sort(lowercase__ , column=0 )
__lowerCAmelCase : Any = column_based_sort(lowercase__ , column=1 )
return (
closest_pair_of_points_sqr(
lowercase__ , lowercase__ , lowercase__ )
) ** 0.5
if __name__ == "__main__":
_UpperCamelCase = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points)))
| 275 | 1 |
from __future__ import annotations
def lowercase_ ( _lowerCamelCase: Union[str, Any] ) -> Dict:
'''simple docstring'''
__lowerCamelCase : str = str(_lowerCAmelCase )
return len(_lowerCAmelCase ) == 9 and set(_lowerCAmelCase ) == set("123456789" )
def lowercase_ ( ) -> Optional[int]:
'''simple docstring'''
for base_num in range(9999 , 4999 , -1 ):
__lowerCamelCase : List[str] = 100002 * base_num
if is_9_pandigital(_lowerCAmelCase ):
return candidate
for base_num in range(333 , 99 , -1 ):
__lowerCamelCase : Optional[int] = 1002003 * base_num
if is_9_pandigital(_lowerCAmelCase ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 354 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _snake_case ( a__ , unittest.TestCase ):
snake_case__ = DiTPipeline
snake_case__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
snake_case__ = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
snake_case__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
snake_case__ = False
def lowerCamelCase__ ( self : Tuple ):
torch.manual_seed(0 )
__lowerCamelCase : Optional[Any] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=UpperCAmelCase , activation_fn="gelu-approximate" , num_embeds_ada_norm=1000 , norm_type="ada_norm_zero" , norm_elementwise_affine=UpperCAmelCase , )
__lowerCamelCase : List[str] = AutoencoderKL()
__lowerCamelCase : List[Any] = DDIMScheduler()
__lowerCamelCase : Optional[Any] = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any]=0 ):
if str(UpperCAmelCase ).startswith("mps" ):
__lowerCamelCase : List[str] = torch.manual_seed(UpperCAmelCase )
else:
__lowerCamelCase : List[str] = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
__lowerCamelCase : str = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : Dict = "cpu"
__lowerCamelCase : int = self.get_dummy_components()
__lowerCamelCase : Optional[Any] = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCamelCase : Optional[int] = self.get_dummy_inputs(UpperCAmelCase )
__lowerCamelCase : List[Any] = pipe(**UpperCAmelCase ).images
__lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__lowerCamelCase : Optional[int] = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] )
__lowerCamelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase , 1E-3 )
def lowerCamelCase__ ( self : Any ):
self._test_inference_batch_single_identical(relax_max_difference=UpperCAmelCase , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowerCamelCase__ ( self : List[str] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class _snake_case ( unittest.TestCase ):
def lowerCamelCase__ ( self : Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : List[str] ):
__lowerCamelCase : Optional[int] = torch.manual_seed(0 )
__lowerCamelCase : str = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
__lowerCamelCase : Tuple = ["vase", "umbrella", "white shark", "white wolf"]
__lowerCamelCase : Optional[int] = pipe.get_label_ids(UpperCAmelCase )
__lowerCamelCase : Optional[Any] = pipe(UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase : Dict = load_numpy(
F"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-2
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : Tuple = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
__lowerCamelCase : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
__lowerCamelCase : Union[str, Any] = ["vase", "umbrella"]
__lowerCamelCase : int = pipe.get_label_ids(UpperCAmelCase )
__lowerCamelCase : Dict = torch.manual_seed(0 )
__lowerCamelCase : Dict = pipe(UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
F"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-1
| 64 | 0 |
'''simple docstring'''
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("""To use the rich extension, install rich with `pip install rich`""")
| 174 |
'''simple docstring'''
from collections.abc import Generator
def __magic_name__( ):
__lowerCAmelCase , __lowerCAmelCase = 0, 1
while True:
__lowerCAmelCase , __lowerCAmelCase = b, a + b
yield b
def __magic_name__( lowerCamelCase = 1_0_0_0):
__lowerCAmelCase = 1
__lowerCAmelCase = fibonacci_generator()
while len(str(next(lowerCamelCase))) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 174 | 1 |
"""simple docstring"""
import math
def lowercase__( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
if initial_intensity < 0:
raise ValueError('The value of intensity cannot be negative' )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_60:
raise ValueError('In Malus Law, the angle is in the range 0-360 degrees' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__SCREAMING_SNAKE_CASE ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="malus_law")
| 321 |
"""simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=0.2 ,__UpperCamelCase=0.2 ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Optional[int] = bp_numa
lowercase_ : Dict = bp_numa
lowercase_ : Tuple = bp_numa
lowercase_ : List[Any] = conva_get[:2]
lowercase_ : int = conva_get[2]
lowercase_ : Dict = size_pa
lowercase_ : int = rate_w
lowercase_ : Union[str, Any] = rate_t
lowercase_ : Dict = [
np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
lowercase_ : str = -2 * np.random.rand(self.conva[1] ) + 1
lowercase_ : Tuple = -2 * np.random.rand(self.num_bpa ) + 1
lowercase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : int = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(__UpperCamelCase ,'wb' ) as f:
pickle.dump(__UpperCamelCase ,__UpperCamelCase )
print(f'''Model saved: {save_path}''' )
@classmethod
def _UpperCAmelCase ( cls ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
with open(__UpperCamelCase ,'rb' ) as f:
lowercase_ : Any = pickle.load(__UpperCamelCase ) # noqa: S301
lowercase_ : str = model_dic.get('conv1' )
conv_get.append(model_dic.get('step_conv1' ) )
lowercase_ : Union[str, Any] = model_dic.get('size_pooling1' )
lowercase_ : Optional[Any] = model_dic.get('num_bp1' )
lowercase_ : str = model_dic.get('num_bp2' )
lowercase_ : Optional[Any] = model_dic.get('num_bp3' )
lowercase_ : Union[str, Any] = model_dic.get('rate_weight' )
lowercase_ : Optional[int] = model_dic.get('rate_thre' )
# create model instance
lowercase_ : Any = CNN(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# modify model parameter
lowercase_ : Optional[Any] = model_dic.get('w_conv1' )
lowercase_ : Tuple = model_dic.get('wkj' )
lowercase_ : Union[str, Any] = model_dic.get('vji' )
lowercase_ : Optional[Any] = model_dic.get('thre_conv1' )
lowercase_ : Dict = model_dic.get('thre_bp2' )
lowercase_ : Optional[int] = model_dic.get('thre_bp3' )
return conv_ins
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any:
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x ))
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
return round(__UpperCamelCase ,3 )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : Dict = convs[0]
lowercase_ : Any = convs[1]
lowercase_ : Optional[Any] = np.shape(__UpperCamelCase )[0]
# get the data slice of original image data, data_focus
lowercase_ : Tuple = []
for i_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ):
for j_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ):
lowercase_ : List[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__UpperCamelCase )
# calculate the feature map of every single kernel, and saved as list of matrix
lowercase_ : Dict = []
lowercase_ : Dict = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(__UpperCamelCase ):
lowercase_ : Tuple = []
for i_focus in range(len(__UpperCamelCase ) ):
lowercase_ : Optional[int] = (
np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(__UpperCamelCase ) )
lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase ).reshape(
__UpperCamelCase ,__UpperCamelCase )
data_featuremap.append(__UpperCamelCase )
# expanding the data slice to One dimenssion
lowercase_ : Optional[int] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__UpperCamelCase ) )
lowercase_ : str = np.asarray(__UpperCamelCase )
return focus_list, data_featuremap
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase="average_pool" ) -> Tuple:
'''simple docstring'''
lowercase_ : Union[str, Any] = len(featuremaps[0] )
lowercase_ : str = int(size_map / size_pooling )
lowercase_ : Optional[int] = []
for i_map in range(len(__UpperCamelCase ) ):
lowercase_ : int = featuremaps[i_map]
lowercase_ : List[str] = []
for i_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
for j_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : List[str] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__UpperCamelCase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__UpperCamelCase ) )
lowercase_ : Dict = np.asmatrix(__UpperCamelCase ).reshape(__UpperCamelCase ,__UpperCamelCase )
featuremap_pooled.append(__UpperCamelCase )
return featuremap_pooled
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any:
'''simple docstring'''
lowercase_ : Tuple = []
for i in range(len(__UpperCamelCase ) ):
lowercase_ : Optional[Any] = np.shape(data[i] )
lowercase_ : List[str] = data[i].reshape(1 ,shapes[0] * shapes[1] )
lowercase_ : List[str] = data_listed.getA().tolist()[0]
data_expanded.extend(__UpperCamelCase )
lowercase_ : int = np.asarray(__UpperCamelCase )
return data_expanded
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : Any = np.asarray(__UpperCamelCase )
lowercase_ : Any = np.shape(__UpperCamelCase )
lowercase_ : Optional[Any] = data_mat.reshape(1 ,shapes[0] * shapes[1] )
return data_expanded
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str:
'''simple docstring'''
lowercase_ : Any = []
lowercase_ : List[Any] = 0
for i_map in range(__UpperCamelCase ):
lowercase_ : List[str] = np.ones((size_map, size_map) )
for i in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
for j in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : List[Any] = pd_pool[
i_pool
]
lowercase_ : Any = i_pool + 1
lowercase_ : Optional[int] = np.multiply(
__UpperCamelCase ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) )
pd_all.append(__UpperCamelCase )
return pd_all
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=bool ) -> Optional[int]:
'''simple docstring'''
print('----------------------Start Training-------------------------' )
print((' - - Shape: Train_Data ', np.shape(__UpperCamelCase )) )
print((' - - Shape: Teach_Data ', np.shape(__UpperCamelCase )) )
lowercase_ : int = 0
lowercase_ : Tuple = []
lowercase_ : Tuple = 1_0000
while rp < n_repeat and mse >= error_accuracy:
lowercase_ : List[str] = 0
print(f'''-------------Learning Time {rp}--------------''' )
for p in range(len(__UpperCamelCase ) ):
# print('------------Learning Image: %d--------------'%p)
lowercase_ : int = np.asmatrix(datas_train[p] )
lowercase_ : Any = np.asarray(datas_teach[p] )
lowercase_ , lowercase_ : Tuple = self.convolute(
__UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
lowercase_ : Any = self.pooling(__UpperCamelCase ,self.size_poolinga )
lowercase_ : Optional[int] = np.shape(__UpperCamelCase )
lowercase_ : Optional[int] = self._expand(__UpperCamelCase )
lowercase_ : int = data_bp_input
lowercase_ : Tuple = np.dot(__UpperCamelCase ,self.vji.T ) - self.thre_bpa
lowercase_ : Dict = self.sig(__UpperCamelCase )
lowercase_ : int = np.dot(__UpperCamelCase ,self.wkj.T ) - self.thre_bpa
lowercase_ : int = self.sig(__UpperCamelCase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowercase_ : str = np.multiply(
(data_teach - bp_outa) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) )
lowercase_ : Optional[int] = np.multiply(
np.dot(__UpperCamelCase ,self.wkj ) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) )
lowercase_ : Any = np.dot(__UpperCamelCase ,self.vji )
lowercase_ : str = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowercase_ : Dict = pd_conva_pooled.T.getA().tolist()
lowercase_ : List[Any] = self._calculate_gradient_from_pool(
__UpperCamelCase ,__UpperCamelCase ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,)
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowercase_ : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] )
lowercase_ : Dict = self.rate_weight * np.dot(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : List[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowercase_ : Dict = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowercase_ : Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowercase_ : Any = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowercase_ : str = self.thre_bpa - pd_k_all * self.rate_thre
lowercase_ : Any = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowercase_ : List[Any] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowercase_ : int = rp + 1
lowercase_ : Union[str, Any] = error_count / patterns
all_mse.append(__UpperCamelCase )
def draw_error():
lowercase_ : str = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(__UpperCamelCase ,'+-' )
plt.plot(__UpperCamelCase ,'r--' )
plt.xlabel('Learning Times' )
plt.ylabel('All_mse' )
plt.grid(__UpperCamelCase ,alpha=0.5 )
plt.show()
print('------------------Training Complished---------------------' )
print((' - - Training epoch: ', rp, f''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Union[str, Any] = []
print('-------------------Start Testing-------------------------' )
print((' - - Shape: Test_Data ', np.shape(__UpperCamelCase )) )
for p in range(len(__UpperCamelCase ) ):
lowercase_ : List[Any] = np.asmatrix(datas_test[p] )
lowercase_ , lowercase_ : Optional[Any] = self.convolute(
__UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
lowercase_ : List[Any] = self.pooling(__UpperCamelCase ,self.size_poolinga )
lowercase_ : List[str] = self._expand(__UpperCamelCase )
lowercase_ : Any = data_bp_input
lowercase_ : Optional[Any] = bp_outa * self.vji.T - self.thre_bpa
lowercase_ : str = self.sig(__UpperCamelCase )
lowercase_ : List[str] = bp_outa * self.wkj.T - self.thre_bpa
lowercase_ : Optional[int] = self.sig(__UpperCamelCase )
produce_out.extend(bp_outa.getA().tolist() )
lowercase_ : List[str] = [list(map(self.do_round ,__UpperCamelCase ) ) for each in produce_out]
return np.asarray(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase )
lowercase_ , lowercase_ : Union[str, Any] = self.convolute(
__UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
lowercase_ : Optional[int] = self.pooling(__UpperCamelCase ,self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 321 | 1 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def lowerCAmelCase_ ( *snake_case_ ):
with open(snake_case_,"""r""" ) as fh:
fcntl.flock(snake_case_,fcntl.LOCK_EX )
try:
print(*snake_case_ )
finally:
fcntl.flock(snake_case_,fcntl.LOCK_UN )
_snake_case = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
_snake_case = torch.device("cuda", local_rank)
_snake_case = socket.gethostname()
_snake_case = f"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
_snake_case = dist.get_rank()
_snake_case = dist.get_world_size()
printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(f"""{gpu} is broken""")
raise
| 26 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> List[str]:
SCREAMING_SNAKE_CASE = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
SCREAMING_SNAKE_CASE = 1_28
elif "12-12" in model_name:
SCREAMING_SNAKE_CASE = 12
SCREAMING_SNAKE_CASE = 12
elif "14-14" in model_name:
SCREAMING_SNAKE_CASE = 14
SCREAMING_SNAKE_CASE = 14
elif "16-16" in model_name:
SCREAMING_SNAKE_CASE = 16
SCREAMING_SNAKE_CASE = 16
else:
raise ValueError('Model not supported' )
SCREAMING_SNAKE_CASE = 'huggingface/label-files'
if "speech-commands" in model_name:
SCREAMING_SNAKE_CASE = 35
SCREAMING_SNAKE_CASE = 'speech-commands-v2-id2label.json'
else:
SCREAMING_SNAKE_CASE = 5_27
SCREAMING_SNAKE_CASE = 'audioset-id2label.json'
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def lowercase (SCREAMING_SNAKE_CASE_ : str ) -> List[Any]:
if "module.v" in name:
SCREAMING_SNAKE_CASE = name.replace('module.v' , 'audio_spectrogram_transformer' )
if "cls_token" in name:
SCREAMING_SNAKE_CASE = name.replace('cls_token' , 'embeddings.cls_token' )
if "dist_token" in name:
SCREAMING_SNAKE_CASE = name.replace('dist_token' , 'embeddings.distillation_token' )
if "pos_embed" in name:
SCREAMING_SNAKE_CASE = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
# transformer blocks
if "blocks" in name:
SCREAMING_SNAKE_CASE = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
SCREAMING_SNAKE_CASE = name.replace('attn' , 'attention.self' )
if "norm1" in name:
SCREAMING_SNAKE_CASE = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
SCREAMING_SNAKE_CASE = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE = name.replace('mlp.fc2' , 'output.dense' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
SCREAMING_SNAKE_CASE = name.replace('audio_spectrogram_transformer.norm' , 'audio_spectrogram_transformer.layernorm' )
# classifier head
if "module.mlp_head.0" in name:
SCREAMING_SNAKE_CASE = name.replace('module.mlp_head.0' , 'classifier.layernorm' )
if "module.mlp_head.1" in name:
SCREAMING_SNAKE_CASE = name.replace('module.mlp_head.1' , 'classifier.dense' )
return name
def lowercase (SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict ) -> Dict:
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
SCREAMING_SNAKE_CASE = key.split('.' )
SCREAMING_SNAKE_CASE = int(key_split[3] )
SCREAMING_SNAKE_CASE = config.hidden_size
if "weight" in key:
SCREAMING_SNAKE_CASE = val[:dim, :]
SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE = val[:dim]
SCREAMING_SNAKE_CASE = val[dim : dim * 2]
SCREAMING_SNAKE_CASE = val[-dim:]
else:
SCREAMING_SNAKE_CASE = val
return orig_state_dict
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> List[Any]:
SCREAMING_SNAKE_CASE = [
'module.v.head.weight',
'module.v.head.bias',
'module.v.head_dist.weight',
'module.v.head_dist.bias',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False ) -> Optional[int]:
SCREAMING_SNAKE_CASE = get_audio_spectrogram_transformer_config(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = {
'ast-finetuned-audioset-10-10-0.4593': (
'https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.450': (
'https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448': (
'https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448-v2': (
'https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'
),
'ast-finetuned-audioset-12-12-0.447': (
'https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'
),
'ast-finetuned-audioset-14-14-0.443': (
'https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'
),
'ast-finetuned-audioset-16-16-0.442': (
'https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'
),
'ast-finetuned-speech-commands-v2': (
'https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'
),
}
# load original state_dict
SCREAMING_SNAKE_CASE = model_name_to_url[model_name]
SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location='cpu' )
# remove some keys
remove_keys(SCREAMING_SNAKE_CASE_ )
# rename some keys
SCREAMING_SNAKE_CASE = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load 🤗 model
SCREAMING_SNAKE_CASE = ASTForAudioClassification(SCREAMING_SNAKE_CASE_ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
SCREAMING_SNAKE_CASE = -4.2_67_73_93 if 'speech-commands' not in model_name else -6.84_59_78
SCREAMING_SNAKE_CASE = 4.5_68_99_74 if 'speech-commands' not in model_name else 5.5_65_45_26
SCREAMING_SNAKE_CASE = 10_24 if 'speech-commands' not in model_name else 1_28
SCREAMING_SNAKE_CASE = ASTFeatureExtractor(mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
if "speech-commands" in model_name:
SCREAMING_SNAKE_CASE = load_dataset('speech_commands' , 'v0.02' , split='validation' )
SCREAMING_SNAKE_CASE = dataset[0]['audio']['array']
else:
SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torchaudio.load(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = waveform.squeeze().numpy()
SCREAMING_SNAKE_CASE = feature_extractor(SCREAMING_SNAKE_CASE_ , sampling_rate=1_60_00 , return_tensors='pt' )
# forward pass
SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
SCREAMING_SNAKE_CASE = torch.tensor([-0.87_60, -7.00_42, -8.66_02] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
SCREAMING_SNAKE_CASE = torch.tensor([-1.19_86, -7.09_03, -8.27_18] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
SCREAMING_SNAKE_CASE = torch.tensor([-2.61_28, -8.00_80, -9.43_44] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
SCREAMING_SNAKE_CASE = torch.tensor([-1.50_80, -7.45_34, -8.89_17] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
SCREAMING_SNAKE_CASE = torch.tensor([-0.50_50, -6.58_33, -8.08_43] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
SCREAMING_SNAKE_CASE = torch.tensor([-0.38_26, -7.03_36, -8.24_13] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
SCREAMING_SNAKE_CASE = torch.tensor([-1.21_13, -6.91_01, -8.34_70] )
elif model_name == "ast-finetuned-speech-commands-v2":
SCREAMING_SNAKE_CASE = torch.tensor([6.15_89, -8.05_66, -8.79_84] )
else:
raise ValueError('Unknown model name' )
if not torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ):
raise ValueError('Logits don\'t match' )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F'Saving feature extractor to {pytorch_dump_folder_path}' )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print('Pushing model and feature extractor to the hub...' )
model.push_to_hub(F'MIT/{model_name}' )
feature_extractor.push_to_hub(F'MIT/{model_name}' )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''ast-finetuned-audioset-10-10-0.4593''',
type=str,
help='''Name of the Audio Spectrogram Transformer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__UpperCamelCase = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 113 | 0 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
_UpperCamelCase = datasets.utils.logging.get_logger(__name__)
_UpperCamelCase = ["""names""", """prefix"""]
_UpperCamelCase = ["""warn_bad_lines""", """error_bad_lines""", """mangle_dupe_cols"""]
_UpperCamelCase = ["""encoding_errors""", """on_bad_lines"""]
_UpperCamelCase = ["""date_format"""]
@dataclass
class lowerCamelCase__ ( datasets.BuilderConfig ):
SCREAMING_SNAKE_CASE = ","
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = "infer"
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = "."
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = '"'
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = 10_000
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = "strict"
SCREAMING_SNAKE_CASE = "error"
SCREAMING_SNAKE_CASE = None
def _UpperCamelCase ( self ):
if self.delimiter is not None:
UpperCAmelCase = self.delimiter
if self.column_names is not None:
UpperCAmelCase = self.column_names
@property
def _UpperCamelCase ( self ):
UpperCAmelCase = {
"""sep""": self.sep,
"""header""": self.header,
"""names""": self.names,
"""index_col""": self.index_col,
"""usecols""": self.usecols,
"""prefix""": self.prefix,
"""mangle_dupe_cols""": self.mangle_dupe_cols,
"""engine""": self.engine,
"""converters""": self.converters,
"""true_values""": self.true_values,
"""false_values""": self.false_values,
"""skipinitialspace""": self.skipinitialspace,
"""skiprows""": self.skiprows,
"""nrows""": self.nrows,
"""na_values""": self.na_values,
"""keep_default_na""": self.keep_default_na,
"""na_filter""": self.na_filter,
"""verbose""": self.verbose,
"""skip_blank_lines""": self.skip_blank_lines,
"""thousands""": self.thousands,
"""decimal""": self.decimal,
"""lineterminator""": self.lineterminator,
"""quotechar""": self.quotechar,
"""quoting""": self.quoting,
"""escapechar""": self.escapechar,
"""comment""": self.comment,
"""encoding""": self.encoding,
"""dialect""": self.dialect,
"""error_bad_lines""": self.error_bad_lines,
"""warn_bad_lines""": self.warn_bad_lines,
"""skipfooter""": self.skipfooter,
"""doublequote""": self.doublequote,
"""memory_map""": self.memory_map,
"""float_precision""": self.float_precision,
"""chunksize""": self.chunksize,
"""encoding_errors""": self.encoding_errors,
"""on_bad_lines""": self.on_bad_lines,
"""date_format""": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() ,A ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class lowerCamelCase__ ( datasets.ArrowBasedBuilder ):
SCREAMING_SNAKE_CASE = CsvConfig
def _UpperCamelCase ( self ):
return datasets.DatasetInfo(features=self.config.features )
def _UpperCamelCase ( self ,A ):
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
UpperCAmelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A ,(str, list, tuple) ):
UpperCAmelCase = data_files
if isinstance(A ,A ):
UpperCAmelCase = [files]
UpperCAmelCase = [dl_manager.iter_files(A ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={"""files""": files} )]
UpperCAmelCase = []
for split_name, files in data_files.items():
if isinstance(A ,A ):
UpperCAmelCase = [files]
UpperCAmelCase = [dl_manager.iter_files(A ) for file in files]
splits.append(datasets.SplitGenerator(name=A ,gen_kwargs={"""files""": files} ) )
return splits
def _UpperCamelCase ( self ,A ):
if self.config.features is not None:
UpperCAmelCase = self.config.features.arrow_schema
if all(not require_storage_cast(A ) for feature in self.config.features.values() ):
# cheaper cast
UpperCAmelCase = pa.Table.from_arrays([pa_table[field.name] for field in schema] ,schema=A )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
UpperCAmelCase = table_cast(A ,A )
return pa_table
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
UpperCAmelCase = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(A ) else object
for name, dtype, feature in zip(schema.names ,schema.types ,self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(A ) ):
UpperCAmelCase = pd.read_csv(A ,iterator=A ,dtype=A ,**self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(A ):
UpperCAmelCase = pa.Table.from_pandas(A )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(A )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(A )}: {e}''' )
raise
| 234 |
"""simple docstring"""
_UpperCamelCase = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_UpperCamelCase = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_UpperCamelCase = {
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
assert len(str(_snake_case ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
UpperCAmelCase = year // 100
UpperCAmelCase = (5 * (century % 4) + 2) % 7
UpperCAmelCase = year % 100
UpperCAmelCase = centurian % 12
UpperCAmelCase = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
UpperCAmelCase = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
UpperCAmelCase = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 234 | 1 |
'''simple docstring'''
def snake_case_ ( _lowerCAmelCase : int ) -> bool:
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("Program to check whether a number is a Perfect number or not...")
UpperCamelCase__: int = int(input("Enter number: ").strip())
print(F"{number} is {'' if perfect(number) else 'not '}a Perfect Number.")
| 23 |
'''simple docstring'''
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Dict ="M-CLIP"
def __init__( self : Tuple , a : Optional[int]=10_24 , a : Tuple=7_68 , **a : List[str] ):
"""simple docstring"""
__lowerCamelCase = transformerDimSize
__lowerCamelCase = imageDimSize
super().__init__(**a )
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[Any] =MCLIPConfig
def __init__( self : str , a : List[Any] , *a : Dict , **a : str ):
"""simple docstring"""
super().__init__(a , *a , **a )
__lowerCamelCase = XLMRobertaModel(a )
__lowerCamelCase = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : int , a : List[Any] ):
"""simple docstring"""
__lowerCamelCase = self.transformer(input_ids=a , attention_mask=a )[0]
__lowerCamelCase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(a ), embs
| 67 | 0 |
from manim import *
class A_ ( SCREAMING_SNAKE_CASE ):
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : Tuple = Rectangle(height=0.5 ,width=0.5)
__lowerCamelCase : str = Rectangle(height=0.46 ,width=0.46).set_stroke(width=0)
__lowerCamelCase : Optional[int] = [mem.copy() for i in range(6)]
__lowerCamelCase : List[Any] = [mem.copy() for i in range(6)]
__lowerCamelCase : Tuple = VGroup(*lowerCamelCase_).arrange(lowerCamelCase_ ,buff=0)
__lowerCamelCase : Tuple = VGroup(*lowerCamelCase_).arrange(lowerCamelCase_ ,buff=0)
__lowerCamelCase : Optional[int] = VGroup(lowerCamelCase_ ,lowerCamelCase_).arrange(lowerCamelCase_ ,buff=0)
__lowerCamelCase : Optional[Any] = Text('CPU' ,font_size=2_4)
__lowerCamelCase : List[Any] = Group(lowerCamelCase_ ,lowerCamelCase_).arrange(lowerCamelCase_ ,buff=0.5 ,aligned_edge=lowerCamelCase_)
cpu.move_to([-2.5, -0.5, 0])
self.add(lowerCamelCase_)
__lowerCamelCase : Tuple = [mem.copy() for i in range(4)]
__lowerCamelCase : str = VGroup(*lowerCamelCase_).arrange(lowerCamelCase_ ,buff=0)
__lowerCamelCase : Optional[Any] = Text('GPU' ,font_size=2_4)
__lowerCamelCase : int = Group(lowerCamelCase_ ,lowerCamelCase_).arrange(lowerCamelCase_ ,buff=0.5 ,aligned_edge=lowerCamelCase_)
gpu.move_to([-1, -1, 0])
self.add(lowerCamelCase_)
__lowerCamelCase : str = [mem.copy() for i in range(6)]
__lowerCamelCase : Optional[int] = VGroup(*lowerCamelCase_).arrange(lowerCamelCase_ ,buff=0)
__lowerCamelCase : Any = Text('Model' ,font_size=2_4)
__lowerCamelCase : Any = Group(lowerCamelCase_ ,lowerCamelCase_).arrange(lowerCamelCase_ ,buff=0.5 ,aligned_edge=lowerCamelCase_)
model.move_to([3, -1.0, 0])
self.add(lowerCamelCase_)
__lowerCamelCase : Dict = []
for i, rect in enumerate(lowerCamelCase_):
rect.set_stroke(lowerCamelCase_)
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
__lowerCamelCase : int = Rectangle(height=0.46 / 4 ,width=0.46 / 3).set_stroke(width=0.0).set_fill(lowerCamelCase_ ,opacity=0.7)
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) ,buff=0.02 ,direction=lowerCamelCase_)
cpu_target.set_x(cpu_target.get_x() + 0.1)
elif i == 3:
cpu_target.next_to(cpu_targs[0] ,direction=lowerCamelCase_ ,buff=0.0)
else:
cpu_target.next_to(cpu_targs[i - 1] ,direction=lowerCamelCase_ ,buff=0.0)
self.add(lowerCamelCase_)
cpu_targs.append(lowerCamelCase_)
__lowerCamelCase : Optional[Any] = [mem.copy() for i in range(6)]
__lowerCamelCase : str = VGroup(*lowerCamelCase_).arrange(lowerCamelCase_ ,buff=0)
__lowerCamelCase : Tuple = Text('Loaded Checkpoint' ,font_size=2_4)
__lowerCamelCase : Any = Group(lowerCamelCase_ ,lowerCamelCase_).arrange(lowerCamelCase_ ,aligned_edge=lowerCamelCase_ ,buff=0.4)
checkpoint.move_to([3, 0.5, 0])
__lowerCamelCase : Optional[int] = Square(side_length=2.2)
key.move_to([-5, 2, 0])
__lowerCamelCase : List[str] = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model" ,font_size=1_8 ,)
key_text.move_to([-5, 2.4, 0])
self.add(lowerCamelCase_ ,lowerCamelCase_)
__lowerCamelCase : str = MarkupText(
F"<span fgcolor=\'{BLUE}\'>●</span> Checkpoint" ,font_size=1_8 ,)
blue_text.next_to(lowerCamelCase_ ,DOWN * 2.4 ,aligned_edge=key_text.get_left())
__lowerCamelCase : Optional[Any] = MarkupText(
F"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." ,font_size=2_4 ,)
step_a.move_to([2, 2, 0])
self.play(Write(lowerCamelCase_) ,Write(lowerCamelCase_))
self.play(Write(lowerCamelCase_ ,run_time=1) ,Create(lowerCamelCase_ ,run_time=1))
__lowerCamelCase : List[Any] = []
__lowerCamelCase : str = []
for i, rect in enumerate(lowerCamelCase_):
__lowerCamelCase : str = fill.copy().set_fill(lowerCamelCase_ ,opacity=0.7)
target.move_to(lowerCamelCase_)
first_animations.append(GrowFromCenter(lowerCamelCase_ ,run_time=1))
__lowerCamelCase : Union[str, Any] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1])
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5])
second_animations.append(MoveToTarget(lowerCamelCase_ ,run_time=1.5))
self.play(*lowerCamelCase_)
self.play(*lowerCamelCase_)
self.wait()
| 361 |
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
a ="""src/transformers"""
a ="""docs/source/en"""
a ="""."""
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
with open(lowerCamelCase__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowerCamelCase : Any = f.readlines()
# Find the start prompt.
__lowerCamelCase : List[str] = 0
while not lines[start_index].startswith(lowerCamelCase__ ):
start_index += 1
start_index += 1
__lowerCamelCase : int = start_index
while not lines[end_index].startswith(lowerCamelCase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
a ="""Model|Encoder|Decoder|ForConditionalGeneration"""
# Regexes that match TF/Flax/PT model names.
a =re.compile(r"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
a =re.compile(r"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
a =re.compile(r"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# This is to make sure the transformers module imported is the one in the repo.
a =direct_transformers_import(TRANSFORMERS_PATH)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> List[str]:
__lowerCamelCase : int = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , lowerCamelCase__ )
return [m.group(0 ) for m in matches]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
__lowerCamelCase : int = 2 if text == '✅' or text == '❌' else len(lowerCamelCase__ )
__lowerCamelCase : Union[str, Any] = (width - text_length) // 2
__lowerCamelCase : List[Any] = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def SCREAMING_SNAKE_CASE__ ( ) -> str:
__lowerCamelCase : Union[str, Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__lowerCamelCase : List[str] = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
__lowerCamelCase : Dict = {name: config.replace('Config' , '' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
__lowerCamelCase : Union[str, Any] = collections.defaultdict(lowerCamelCase__ )
__lowerCamelCase : Union[str, Any] = collections.defaultdict(lowerCamelCase__ )
__lowerCamelCase : Union[str, Any] = collections.defaultdict(lowerCamelCase__ )
__lowerCamelCase : List[str] = collections.defaultdict(lowerCamelCase__ )
__lowerCamelCase : Union[str, Any] = collections.defaultdict(lowerCamelCase__ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowerCamelCase__ ):
__lowerCamelCase : List[Any] = None
if attr_name.endswith('Tokenizer' ):
__lowerCamelCase : Dict = slow_tokenizers
__lowerCamelCase : List[Any] = attr_name[:-9]
elif attr_name.endswith('TokenizerFast' ):
__lowerCamelCase : Union[str, Any] = fast_tokenizers
__lowerCamelCase : str = attr_name[:-1_3]
elif _re_tf_models.match(lowerCamelCase__ ) is not None:
__lowerCamelCase : List[str] = tf_models
__lowerCamelCase : Optional[int] = _re_tf_models.match(lowerCamelCase__ ).groups()[0]
elif _re_flax_models.match(lowerCamelCase__ ) is not None:
__lowerCamelCase : List[Any] = flax_models
__lowerCamelCase : Optional[Any] = _re_flax_models.match(lowerCamelCase__ ).groups()[0]
elif _re_pt_models.match(lowerCamelCase__ ) is not None:
__lowerCamelCase : Optional[int] = pt_models
__lowerCamelCase : Any = _re_pt_models.match(lowerCamelCase__ ).groups()[0]
if lookup_dict is not None:
while len(lowerCamelCase__ ) > 0:
if attr_name in model_name_to_prefix.values():
__lowerCamelCase : List[Any] = True
break
# Try again after removing the last word in the name
__lowerCamelCase : str = ''.join(camel_case_split(lowerCamelCase__ )[:-1] )
# Let's build that table!
__lowerCamelCase : str = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
__lowerCamelCase : Union[str, Any] = ['Model', 'Tokenizer slow', 'Tokenizer fast', 'PyTorch support', 'TensorFlow support', 'Flax Support']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
__lowerCamelCase : List[Any] = [len(lowerCamelCase__ ) + 2 for c in columns]
__lowerCamelCase : int = max([len(lowerCamelCase__ ) for name in model_names] ) + 2
# Build the table per se
__lowerCamelCase : Union[str, Any] = '|' + '|'.join([_center_text(lowerCamelCase__ , lowerCamelCase__ ) for c, w in zip(lowerCamelCase__ , lowerCamelCase__ )] ) + '|\n'
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([':' + '-' * (w - 2) + ':' for w in widths] ) + "|\n"
__lowerCamelCase : List[str] = {True: '✅', False: '❌'}
for name in model_names:
__lowerCamelCase : Optional[int] = model_name_to_prefix[name]
__lowerCamelCase : Any = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowerCamelCase__ , lowerCamelCase__ ) for l, w in zip(lowerCamelCase__ , lowerCamelCase__ )] ) + "|\n"
return table
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__=False ) -> Any:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[str] = _find_text_in_file(
filename=os.path.join(lowerCamelCase__ , 'index.md' ) , start_prompt='<!--This table is updated automatically from the auto modules' , end_prompt='<!-- End table-->' , )
__lowerCamelCase : List[Any] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowerCamelCase__ , 'index.md' ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.' )
if __name__ == "__main__":
a =argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
a =parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 113 | 0 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def _a ( UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int]=False ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("head" ):
lowerCAmelCase__ = "segformer.encoder." + key
if key.startswith("backbone" ):
lowerCAmelCase__ = key.replace("backbone" , "segformer.encoder" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase__ = key[key.find("patch_embed" ) + len("patch_embed" )]
lowerCAmelCase__ = key.replace(F"patch_embed{idx}" , F"patch_embeddings.{int(UpperCamelCase_ )-1}" )
if "norm" in key:
lowerCAmelCase__ = key.replace("norm" , "layer_norm" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase__ = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )]
lowerCAmelCase__ = key.replace(F"layer_norm{idx}" , F"layer_norm.{int(UpperCamelCase_ )-1}" )
if "layer_norm1" in key:
lowerCAmelCase__ = key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
lowerCAmelCase__ = key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase__ = key[key.find("block" ) + len("block" )]
lowerCAmelCase__ = key.replace(F"block{idx}" , F"block.{int(UpperCamelCase_ )-1}" )
if "attn.q" in key:
lowerCAmelCase__ = key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
lowerCAmelCase__ = key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
lowerCAmelCase__ = key.replace("attn" , "attention.self" )
if "fc1" in key:
lowerCAmelCase__ = key.replace("fc1" , "dense1" )
if "fc2" in key:
lowerCAmelCase__ = key.replace("fc2" , "dense2" )
if "linear_pred" in key:
lowerCAmelCase__ = key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
lowerCAmelCase__ = key.replace("linear_fuse.conv" , "linear_fuse" )
lowerCAmelCase__ = key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase__ = key[key.find("linear_c" ) + len("linear_c" )]
lowerCAmelCase__ = key.replace(F"linear_c{idx}" , F"linear_c.{int(UpperCamelCase_ )-1}" )
if key.startswith("head" ):
lowerCAmelCase__ = key.replace("head" , "classifier" )
lowerCAmelCase__ = value
return new_state_dict
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase__ = state_dict.pop(F"segformer.encoder.block.{i}.{j}.attention.self.kv.weight" )
lowerCAmelCase__ = state_dict.pop(F"segformer.encoder.block.{i}.{j}.attention.self.kv.bias" )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase__ = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase__ = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase__ = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase__ = kv_bias[
config.hidden_sizes[i] :
]
def _a ( ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return image
@torch.no_grad()
def _a ( UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ = SegformerConfig()
lowerCAmelCase__ = False
# set attributes based on model_name
lowerCAmelCase__ = "huggingface/label-files"
if "segformer" in model_name:
lowerCAmelCase__ = model_name[len("segformer." ) : len("segformer." ) + 2]
if "ade" in model_name:
lowerCAmelCase__ = 150
lowerCAmelCase__ = "ade20k-id2label.json"
lowerCAmelCase__ = (1, 150, 128, 128)
elif "city" in model_name:
lowerCAmelCase__ = 19
lowerCAmelCase__ = "cityscapes-id2label.json"
lowerCAmelCase__ = (1, 19, 128, 128)
else:
raise ValueError(F"Model {model_name} not supported" )
elif "mit" in model_name:
lowerCAmelCase__ = True
lowerCAmelCase__ = model_name[4:6]
lowerCAmelCase__ = 1_000
lowerCAmelCase__ = "imagenet-1k-id2label.json"
lowerCAmelCase__ = (1, 1_000)
else:
raise ValueError(F"Model {model_name} not supported" )
# set config attributes
lowerCAmelCase__ = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type="dataset" ) , "r" ) )
lowerCAmelCase__ = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
lowerCAmelCase__ = [64, 128, 320, 512]
lowerCAmelCase__ = 256
elif size == "b2":
lowerCAmelCase__ = [64, 128, 320, 512]
lowerCAmelCase__ = 768
lowerCAmelCase__ = [3, 4, 6, 3]
elif size == "b3":
lowerCAmelCase__ = [64, 128, 320, 512]
lowerCAmelCase__ = 768
lowerCAmelCase__ = [3, 4, 18, 3]
elif size == "b4":
lowerCAmelCase__ = [64, 128, 320, 512]
lowerCAmelCase__ = 768
lowerCAmelCase__ = [3, 8, 27, 3]
elif size == "b5":
lowerCAmelCase__ = [64, 128, 320, 512]
lowerCAmelCase__ = 768
lowerCAmelCase__ = [3, 6, 40, 3]
else:
raise ValueError(F"Size {size} not supported" )
# load image processor (only resize + normalize)
lowerCAmelCase__ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCamelCase_ , align=UpperCamelCase_ , do_random_crop=UpperCamelCase_ )
# prepare image
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=UpperCamelCase_ , return_tensors="pt" ).pixel_values
logger.info(F"Converting model {model_name}..." )
# load original state dict
if encoder_only:
lowerCAmelCase__ = torch.load(UpperCamelCase_ , map_location=torch.device("cpu" ) )
else:
lowerCAmelCase__ = torch.load(UpperCamelCase_ , map_location=torch.device("cpu" ) )["state_dict"]
# rename keys
lowerCAmelCase__ = rename_keys(UpperCamelCase_ , encoder_only=UpperCamelCase_ )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(UpperCamelCase_ , UpperCamelCase_ )
# create HuggingFace model and load state dict
if encoder_only:
lowerCAmelCase__ = False
lowerCAmelCase__ = SegformerForImageClassification(UpperCamelCase_ )
else:
lowerCAmelCase__ = SegformerForSemanticSegmentation(UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
model.eval()
# forward pass
lowerCAmelCase__ = model(UpperCamelCase_ )
lowerCAmelCase__ = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
lowerCAmelCase__ = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
lowerCAmelCase__ = torch.tensor(
[
[[-7.5_820, -8.7_231, -8.3_215], [-8.0_600, -10.3_529, -10.0_304], [-7.5_208, -9.4_103, -9.6_239]],
[[-12.6_918, -13.8_994, -13.7_137], [-13.3_196, -15.7_523, -15.4_789], [-12.9_343, -14.8_757, -14.9_689]],
[[-11.1_911, -11.9_421, -11.3_243], [-11.3_342, -13.6_839, -13.3_581], [-10.3_909, -12.1_832, -12.4_858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
lowerCAmelCase__ = torch.tensor(
[
[[-11.8_173, -14.3_850, -16.3_128], [-14.5_648, -16.5_804, -18.6_568], [-14.7_223, -15.7_387, -18.4_218]],
[[-15.7_290, -17.9_171, -19.4_423], [-18.3_105, -19.9_448, -21.4_661], [-17.9_296, -18.6_497, -20.7_910]],
[[-15.0_783, -17.0_336, -18.2_789], [-16.8_771, -18.6_870, -20.1_612], [-16.2_454, -17.1_426, -19.5_055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
lowerCAmelCase__ = torch.tensor(
[
[[-9.0_878, -10.2_081, -10.1_891], [-9.3_144, -10.7_941, -10.9_843], [-9.2_294, -10.3_855, -10.5_704]],
[[-12.2_316, -13.9_068, -13.6_102], [-12.9_161, -14.3_702, -14.3_235], [-12.5_233, -13.7_174, -13.7_932]],
[[-14.6_275, -15.2_490, -14.9_727], [-14.3_400, -15.9_687, -16.2_827], [-14.1_484, -15.4_033, -15.8_937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
lowerCAmelCase__ = torch.tensor(
[
[[-12.3_144, -13.2_447, -14.0_802], [-13.3_614, -14.5_816, -15.6_117], [-13.3_340, -14.4_433, -16.2_219]],
[[-19.2_781, -20.4_128, -20.7_506], [-20.6_153, -21.6_566, -22.0_998], [-19.9_800, -21.0_430, -22.1_494]],
[[-18.8_739, -19.7_804, -21.1_834], [-20.1_233, -21.6_765, -23.2_944], [-20.0_315, -21.2_641, -23.6_944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
lowerCAmelCase__ = torch.tensor(
[
[[-9.5_524, -12.0_835, -11.7_348], [-10.5_229, -13.6_446, -14.5_662], [-9.5_842, -12.8_851, -13.9_414]],
[[-15.3_432, -17.5_323, -17.0_818], [-16.3_330, -18.9_255, -19.2_101], [-15.1_340, -17.7_848, -18.3_971]],
[[-12.6_072, -14.9_486, -14.6_631], [-13.7_629, -17.0_907, -17.7_745], [-12.7_899, -16.1_695, -17.1_671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
lowerCAmelCase__ = torch.tensor(
[
[[-11.9_295, -13.4_057, -14.8_106], [-13.3_431, -14.8_179, -15.3_781], [-14.2_836, -15.5_942, -16.1_588]],
[[-11.4_906, -12.8_067, -13.6_564], [-13.1_189, -14.0_500, -14.1_543], [-13.8_748, -14.5_136, -14.8_789]],
[[0.5_374, 0.1_067, -0.4_742], [0.1_141, -0.2_255, -0.7_099], [-0.3_000, -0.5_924, -1.3_105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
lowerCAmelCase__ = torch.tensor(
[
[[-7.8_217, -9.8_767, -10.1_717], [-9.4_438, -10.9_058, -11.4_047], [-9.7_939, -12.3_495, -12.1_079]],
[[-7.1_514, -9.5_336, -10.0_860], [-9.7_776, -11.6_822, -11.8_439], [-10.1_411, -12.7_655, -12.8_972]],
[[0.3_021, 0.0_805, -0.2_310], [-0.0_328, -0.1_605, -0.2_714], [-0.1_408, -0.5_477, -0.6_976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
lowerCAmelCase__ = torch.tensor(
[
[
[-1.1_3_7_2e0_1, -1.2_7_8_7e0_1, -1.3_4_7_7e0_1],
[-1.2_5_3_6e0_1, -1.4_1_9_4e0_1, -1.4_4_0_9e0_1],
[-1.3_2_1_7e0_1, -1.4_8_8_8e0_1, -1.5_3_2_7e0_1],
],
[
[-1.4_7_9_1e0_1, -1.7_1_2_2e0_1, -1.8_2_7_7e0_1],
[-1.7_1_6_3e0_1, -1.9_1_9_2e0_1, -1.9_5_3_3e0_1],
[-1.7_8_9_7e0_1, -1.9_9_9_1e0_1, -2.0_3_1_5e0_1],
],
[
[7.6_7_2_3e-0_1, 4.1_9_2_1e-0_1, -7.7_8_7_8e-0_2],
[4.7_7_7_2e-0_1, 9.5_5_5_7e-0_3, -2.8_0_8_2e-0_1],
[3.6_0_3_2e-0_1, -2.4_8_2_6e-0_1, -5.1_1_6_8e-0_1],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
lowerCAmelCase__ = torch.tensor(
[
[[-9.4_959, -11.3_087, -11.7_479], [-11.0_025, -12.6_540, -12.3_319], [-11.4_064, -13.0_487, -12.9_905]],
[[-9.8_905, -11.3_084, -12.0_854], [-11.1_726, -12.7_698, -12.9_583], [-11.5_985, -13.3_278, -14.1_774]],
[[0.2_213, 0.0_192, -0.2_466], [-0.1_731, -0.4_213, -0.4_874], [-0.3_126, -0.6_541, -1.1_389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
lowerCAmelCase__ = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
lowerCAmelCase__ = torch.tensor(
[
[[-16.0_976, -16.4_856, -17.3_962], [-16.6_234, -19.0_342, -19.7_685], [-16.0_900, -18.0_661, -19.1_180]],
[[-18.4_750, -18.8_488, -19.5_074], [-19.4_030, -22.1_570, -22.5_977], [-19.1_191, -20.8_486, -22.3_783]],
[[-4.5_178, -5.5_037, -6.5_109], [-5.0_884, -7.2_174, -8.0_334], [-4.4_156, -5.8_117, -7.2_970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
lowerCAmelCase__ = torch.tensor(
[
[[-14.2_081, -14.4_732, -14.1_977], [-14.5_867, -16.4_423, -16.6_356], [-13.4_441, -14.9_685, -16.8_696]],
[[-14.4_576, -14.7_073, -15.0_451], [-15.0_816, -17.6_237, -17.9_873], [-14.4_213, -16.0_199, -18.5_992]],
[[-4.7_349, -4.9_588, -5.0_966], [-4.3_210, -6.9_325, -7.2_591], [-3.4_312, -4.7_484, -7.1_917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
lowerCAmelCase__ = torch.tensor(
[
[[-11.7_737, -11.9_526, -11.3_273], [-13.6_692, -14.4_574, -13.8_878], [-13.8_937, -14.6_924, -15.9_345]],
[[-14.6_706, -14.5_330, -14.1_306], [-16.1_502, -16.8_180, -16.4_269], [-16.8_338, -17.8_939, -20.1_746]],
[[1.0_491, 0.8_289, 1.0_310], [1.1_044, 0.5_219, 0.8_055], [1.0_899, 0.6_926, 0.5_590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
lowerCAmelCase__ = torch.tensor(
[
[[-12.5_641, -13.4_777, -13.0_684], [-13.9_587, -15.8_983, -16.6_557], [-13.3_109, -15.7_350, -16.3_141]],
[[-14.7_074, -15.4_352, -14.5_944], [-16.6_353, -18.1_663, -18.6_120], [-15.1_702, -18.0_329, -18.1_547]],
[[-1.7_990, -2.0_951, -1.7_784], [-2.6_397, -3.8_245, -3.9_686], [-1.5_264, -2.8_126, -2.9_316]],
] )
else:
lowerCAmelCase__ = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase_ , atol=1e-2 )
# finally, save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
image_processor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''segformer.b0.512x512.ade.160k''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
a_ = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 340 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
a_ = {
'''n_samples''': 64,
'''horizon''': 32,
'''num_inference_steps''': 20,
'''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network
'''scale_grad_by_std''': True,
'''scale''': 0.1,
'''eta''': 0.0,
'''t_grad_cutoff''': 2,
'''device''': '''cpu''',
}
if __name__ == "__main__":
a_ = '''hopper-medium-v2'''
a_ = gym.make(env_name)
a_ = ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
a_ = env.reset()
a_ = 0
a_ = 0
a_ = 1000
a_ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
a_ = pipeline(obs, planning_horizon=32)
# execute action in environment
a_, a_, a_, a_ = env.step(denorm_actions)
a_ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"
F" {total_score}"
)
# save observations for rendering
rollout.append(next_observation.copy())
a_ = next_observation
except KeyboardInterrupt:
pass
print(F"Total reward: {total_reward}")
| 340 | 1 |
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class __lowercase ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = CpmAntTokenizer
_UpperCAmelCase : Any = False
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
super().setUp()
SCREAMING_SNAKE_CASE_: List[str] = [
"<d>",
"</d>",
"<s>",
"</s>",
"</_>",
"<unk>",
"<pad>",
"</n>",
"我",
"是",
"C",
"P",
"M",
"A",
"n",
"t",
]
SCREAMING_SNAKE_CASE_: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
@tooslow
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: int = CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b")
SCREAMING_SNAKE_CASE_: Union[str, Any] = "今天天气真好!"
SCREAMING_SNAKE_CASE_: Optional[Any] = ["今天", "天气", "真", "好", "!"]
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.tokenize(a__)
self.assertListEqual(a__ , a__)
SCREAMING_SNAKE_CASE_: Optional[Any] = "今天天气真好!"
SCREAMING_SNAKE_CASE_: List[Any] = [tokenizer.bos_token] + tokens
SCREAMING_SNAKE_CASE_: Dict = [6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__) , a__)
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.decode(a__)
self.assertEqual(a__ , a__)
| 364 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Any = ['''pixel_values''']
def __init__( self : List[Any] , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Union[int, float] = 1 / 255 , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : bool = True , **lowerCAmelCase__ : Tuple , ):
super().__init__(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = size if size is not None else {"height": 384, "width": 384}
SCREAMING_SNAKE_CASE_: Union[str, Any] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = do_resize
SCREAMING_SNAKE_CASE_: Dict = size
SCREAMING_SNAKE_CASE_: int = resample
SCREAMING_SNAKE_CASE_: str = do_rescale
SCREAMING_SNAKE_CASE_: str = rescale_factor
SCREAMING_SNAKE_CASE_: Optional[Any] = do_normalize
SCREAMING_SNAKE_CASE_: Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
SCREAMING_SNAKE_CASE_: List[str] = image_std if image_std is not None else OPENAI_CLIP_STD
SCREAMING_SNAKE_CASE_: Optional[int] = do_convert_rgb
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Tuple , ):
SCREAMING_SNAKE_CASE_: List[Any] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__)
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
SCREAMING_SNAKE_CASE_: int = (size["height"], size["width"])
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[int, float] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : List[Any] , ):
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : List[Any] , ):
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : ImageInput , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[Dict[str, int]] = None , lowerCAmelCase__ : PILImageResampling = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[float] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase__ : Dict , ):
SCREAMING_SNAKE_CASE_: Any = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_: Optional[int] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_: Dict = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_: Any = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_: Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_: Optional[Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_: Optional[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_: Optional[int] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
SCREAMING_SNAKE_CASE_: str = size if size is not None else self.size
SCREAMING_SNAKE_CASE_: List[Any] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = make_list_of_images(lowerCAmelCase__)
if not valid_images(lowerCAmelCase__):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# PIL RGBA images are converted to RGB
if do_convert_rgb:
SCREAMING_SNAKE_CASE_: List[Any] = [convert_to_rgb(lowerCAmelCase__) for image in images]
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_: List[Any] = [to_numpy_array(lowerCAmelCase__) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_: int = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_: Optional[Any] = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_: List[str] = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__) for image in images]
SCREAMING_SNAKE_CASE_: str = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__) for image in images]
SCREAMING_SNAKE_CASE_: List[str] = BatchFeature(data={"pixel_values": images} , tensor_type=lowerCAmelCase__)
return encoded_outputs
| 127 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : str = {"vocab_file": "sentencepiece.bpe.model"}
lowerCamelCase : int = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
}
}
lowerCamelCase : str = {
"camembert-base": 5_1_2,
}
lowerCamelCase : List[Any] = "▁"
class A__ ( A__ ):
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = ['input_ids', 'attention_mask']
def __init__( self : Optional[Any] , _a : str , _a : Optional[int]="<s>" , _a : Any="</s>" , _a : Tuple="</s>" , _a : Tuple="<s>" , _a : str="<unk>" , _a : Optional[Any]="<pad>" , _a : Optional[Any]="<mask>" , _a : Union[str, Any]=["<s>NOTUSED", "</s>NOTUSED"] , _a : Optional[Dict[str, Any]] = None , **_a : List[str] , ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
_SCREAMING_SNAKE_CASE ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , additional_special_tokens=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
_SCREAMING_SNAKE_CASE =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_a ) )
_SCREAMING_SNAKE_CASE =vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
_SCREAMING_SNAKE_CASE ={'<s>NOTUSED': 0, '<pad>': 1, '</s>NOTUSED': 2, '<unk>': 3}
_SCREAMING_SNAKE_CASE =len(self.fairseq_tokens_to_ids )
_SCREAMING_SNAKE_CASE =len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
_SCREAMING_SNAKE_CASE ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def A ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_SCREAMING_SNAKE_CASE =[self.cls_token_id]
_SCREAMING_SNAKE_CASE =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A ( self : Optional[Any] , _a : List[int] , _a : Optional[List[int]] = None , _a : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def A ( self : Dict , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[self.sep_token_id]
_SCREAMING_SNAKE_CASE =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def A ( self : Tuple ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ={self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A ( self : Optional[int] , _a : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(_a , out_type=_a )
def A ( self : Optional[int] , _a : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(_a ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(_a )
def A ( self : Tuple , _a : Any ) -> str:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def A ( self : List[Any] , _a : List[Any] ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =''
_SCREAMING_SNAKE_CASE =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =[]
else:
current_sub_tokens.append(_a )
_SCREAMING_SNAKE_CASE =False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def __getstate__( self : str ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.__dict__.copy()
_SCREAMING_SNAKE_CASE =None
return state
def __setstate__( self : Union[str, Any] , _a : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A ( self : Any , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_SCREAMING_SNAKE_CASE =os.path.join(
_a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , 'wb' ) as fi:
_SCREAMING_SNAKE_CASE =self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 47 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_A = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 0 |
"""simple docstring"""
def _lowercase ( __snake_case = 10 ,__snake_case = 1_000 ,__snake_case = True ) -> int:
assert (
isinstance(__snake_case ,__snake_case )
and isinstance(__snake_case ,__snake_case )
and isinstance(__snake_case ,__snake_case )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" )
return min_val if option else max_val
def _lowercase ( __snake_case ,__snake_case ) -> int:
return int((number_a + number_a) / 2 )
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> None:
assert (
isinstance(__snake_case ,__snake_case ) and isinstance(__snake_case ,__snake_case ) and isinstance(__snake_case ,__snake_case )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)" )
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value" )
def answer(__snake_case ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started..." )
__lowerCAmelCase : List[Any] = lower
__lowerCAmelCase : List[Any] = higher
__lowerCAmelCase : int = []
while True:
__lowerCAmelCase : Optional[int] = get_avg(__snake_case ,__snake_case )
last_numbers.append(__snake_case )
if answer(__snake_case ) == "low":
__lowerCAmelCase : int = number
elif answer(__snake_case ) == "high":
__lowerCAmelCase : Optional[int] = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""" )
print(F"""details : {last_numbers!s}""" )
def _lowercase ( ) -> None:
__lowerCAmelCase : Tuple = int(input("Enter lower value : " ).strip() )
__lowerCAmelCase : Dict = int(input("Enter high value : " ).strip() )
__lowerCAmelCase : Tuple = int(input("Enter value to guess : " ).strip() )
guess_the_number(__snake_case ,__snake_case ,__snake_case )
if __name__ == "__main__":
main()
| 58 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self: Dict) -> Any:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=_SCREAMING_SNAKE_CASE).to(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained("google/mt5-small")
__lowerCAmelCase : Tuple = tokenizer("Hello there" , return_tensors="pt").input_ids
__lowerCAmelCase : List[str] = tokenizer("Hi I am" , return_tensors="pt").input_ids
__lowerCAmelCase : List[str] = model(input_ids.to(_SCREAMING_SNAKE_CASE) , labels=labels.to(_SCREAMING_SNAKE_CASE)).loss
__lowerCAmelCase : Optional[int] = -(labels.shape[-1] * loss.item())
__lowerCAmelCase : List[str] = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
| 58 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
__SCREAMING_SNAKE_CASE : Tuple = DetaConfig(
backbone_config=lowercase__ , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=lowercase__ , with_box_refine=lowercase__ , two_stage=lowercase__ , )
# set labels
__SCREAMING_SNAKE_CASE : Tuple = '''huggingface/label-files'''
if "o365" in model_name:
__SCREAMING_SNAKE_CASE : Dict = 366
__SCREAMING_SNAKE_CASE : Any = '''object365-id2label.json'''
else:
__SCREAMING_SNAKE_CASE : List[str] = 91
__SCREAMING_SNAKE_CASE : List[Any] = '''coco-detection-id2label.json'''
__SCREAMING_SNAKE_CASE : List[str] = num_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = json.load(open(cached_download(hf_hub_url(lowercase__ , lowercase__ , repo_type='''dataset''' ) ) , '''r''' ) )
__SCREAMING_SNAKE_CASE : Dict = {int(lowercase__ ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : Optional[Any] = idalabel
__SCREAMING_SNAKE_CASE : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : int = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.reduction.weight''', F'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.norm.weight''', F'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.norm.bias''', F'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') )
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') )
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') )
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') )
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') )
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', F'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', F'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', F'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', F'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', F'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', F'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', F'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', F'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.weight''', F'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', F'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', F'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', F'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', F'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', F'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', F'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', F'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', F'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.weight''', F'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm2.weight''', F'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm2.bias''', F'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = dct.pop(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = val
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__SCREAMING_SNAKE_CASE : Optional[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(F'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
__SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(F'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__SCREAMING_SNAKE_CASE : Any = in_proj_weight[:dim, :]
__SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[: dim]
__SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_weight[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : str = in_proj_bias[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[
-dim :, :
]
__SCREAMING_SNAKE_CASE : Tuple = in_proj_bias[-dim :]
# fmt: on
def _UpperCamelCase ( lowercase__ , lowercase__ ):
# transformer decoder self-attention layers
__SCREAMING_SNAKE_CASE : Any = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
__SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(F'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
__SCREAMING_SNAKE_CASE : Any = state_dict.pop(F'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__SCREAMING_SNAKE_CASE : Any = in_proj_weight[:hidden_size, :]
__SCREAMING_SNAKE_CASE : Tuple = in_proj_bias[:hidden_size]
__SCREAMING_SNAKE_CASE : str = in_proj_weight[
hidden_size : hidden_size * 2, :
]
__SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[hidden_size : hidden_size * 2]
__SCREAMING_SNAKE_CASE : int = in_proj_weight[-hidden_size:, :]
__SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[-hidden_size:]
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__SCREAMING_SNAKE_CASE : Optional[int] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = get_deta_config(lowercase__ )
# load original state dict
if model_name == "deta-swin-large":
__SCREAMING_SNAKE_CASE : Union[str, Any] = hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' )
elif model_name == "deta-swin-large-o365":
__SCREAMING_SNAKE_CASE : Optional[Any] = hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
__SCREAMING_SNAKE_CASE : Dict = torch.load(lowercase__ , map_location='''cpu''' )['''model''']
# original state dict
for name, param in state_dict.items():
print(lowercase__ , param.shape )
# rename keys
__SCREAMING_SNAKE_CASE : Optional[int] = create_rename_keys(lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
read_in_swin_q_k_v(lowercase__ , config.backbone_config )
read_in_decoder_q_k_v(lowercase__ , lowercase__ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
__SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = val
if "input_proj" in key:
__SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(lowercase__ )
__SCREAMING_SNAKE_CASE : str = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
__SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(lowercase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = val
# finally, create HuggingFace model and load state dict
__SCREAMING_SNAKE_CASE : List[str] = DetaForObjectDetection(lowercase__ )
model.load_state_dict(lowercase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
model.to(lowercase__ )
# load image processor
__SCREAMING_SNAKE_CASE : Union[str, Any] = DetaImageProcessor(format='''coco_detection''' )
# verify our conversion on image
__SCREAMING_SNAKE_CASE : Tuple = prepare_img()
__SCREAMING_SNAKE_CASE : Optional[Any] = processor(images=lowercase__ , return_tensors='''pt''' )
__SCREAMING_SNAKE_CASE : Optional[int] = encoding['''pixel_values''']
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(pixel_values.to(lowercase__ ) )
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3] )
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
__SCREAMING_SNAKE_CASE : str = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(lowercase__ ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(lowercase__ ) , atol=1e-4 )
print('''Everything ok!''' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
processor.save_pretrained(lowercase__ )
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''' )
model.push_to_hub(F'''jozhang97/{model_name}''' )
processor.push_to_hub(F'''jozhang97/{model_name}''' )
if __name__ == "__main__":
__lowerCAmelCase : str =argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowerCAmelCase : List[str] =parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 9 |
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_lowercase : List[str] = ["text", "image", "audio"]
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
lowercase_ : int = []
for input_type in input_types:
if input_type == "text":
inputs.append('''Text input''' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
inputs.append(create_inputs(__SCREAMING_SNAKE_CASE ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def snake_case_ ( __SCREAMING_SNAKE_CASE : List ):
"""simple docstring"""
lowercase_ : Optional[Any] = []
for output in outputs:
if isinstance(__SCREAMING_SNAKE_CASE , (str, AgentText) ):
output_types.append('''text''' )
elif isinstance(__SCREAMING_SNAKE_CASE , (Image.Image, AgentImage) ):
output_types.append('''image''' )
elif isinstance(__SCREAMING_SNAKE_CASE , (torch.Tensor, AgentAudio) ):
output_types.append('''audio''' )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class lowerCAmelCase__ :
def _snake_case ( self ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , '''inputs''' ) )
self.assertTrue(hasattr(self.tool , '''outputs''' ) )
lowercase_ : Optional[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , __SCREAMING_SNAKE_CASE ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowercase_ : int = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = create_inputs(self.tool.inputs )
lowercase_ : Tuple = self.tool(*__SCREAMING_SNAKE_CASE )
# There is a single output
if len(self.tool.outputs ) == 1:
lowercase_ : Any = [outputs]
self.assertListEqual(output_types(__SCREAMING_SNAKE_CASE ) , self.tool.outputs )
def _snake_case ( self ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , '''description''' ) )
self.assertTrue(hasattr(self.tool , '''default_checkpoint''' ) )
self.assertTrue(self.tool.description.startswith('''This is a tool that''' ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = create_inputs(self.tool.inputs )
lowercase_ : int = self.tool(*__SCREAMING_SNAKE_CASE )
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = [outputs]
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(self.tool.outputs ) )
for output, output_type in zip(__SCREAMING_SNAKE_CASE , self.tool.outputs ):
lowercase_ : Optional[int] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = create_inputs(self.tool.inputs )
lowercase_ : int = []
for _input, input_type in zip(__SCREAMING_SNAKE_CASE , self.tool.inputs ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowercase_ : Optional[Any] = self.tool(*__SCREAMING_SNAKE_CASE )
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Dict = [outputs]
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(self.tool.outputs ) )
| 93 | 0 |
from __future__ import annotations
from PIL import Image
# Define glider example
snake_case_ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
snake_case_ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowerCamelCase__ ( snake_case_ : list[list[int]] ) -> Dict:
__snake_case = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
__snake_case = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
__snake_case = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(SCREAMING_SNAKE_CASE_ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(SCREAMING_SNAKE_CASE_ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(SCREAMING_SNAKE_CASE_ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
__snake_case = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(SCREAMING_SNAKE_CASE_ )
return next_generation
def lowerCamelCase__ ( snake_case_ : list[list[int]] , snake_case_ : int ) -> Dict:
__snake_case = []
for _ in range(SCREAMING_SNAKE_CASE_ ):
# Create output image
__snake_case = Image.new('''RGB''' , (len(cells[0] ), len(SCREAMING_SNAKE_CASE_ )) )
__snake_case = img.load()
# Save cells to image
for x in range(len(SCREAMING_SNAKE_CASE_ ) ):
for y in range(len(cells[0] ) ):
__snake_case = 255 - cells[y][x] * 255
__snake_case = (colour, colour, colour)
# Save image
images.append(SCREAMING_SNAKE_CASE_ )
__snake_case = new_generation(SCREAMING_SNAKE_CASE_ )
return images
if __name__ == "__main__":
snake_case_ = generate_images(GLIDER, 16)
images[0].save('out.gif', save_all=True, append_images=images[1:])
| 364 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def lowerCamelCase__ ( snake_case_ : str ) -> str:
return "".join(sorted(snake_case_ ) )
def lowerCamelCase__ ( snake_case_ : str ) -> list[str]:
return word_by_signature[signature(snake_case_ )]
snake_case_ = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
snake_case_ = sorted({word.strip().lower() for word in data.splitlines()})
snake_case_ = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
snake_case_ = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 238 | 0 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowerCAmelCase_ :
UpperCAmelCase__ : List[str] = LEDConfig
UpperCAmelCase__ : Any = {}
UpperCAmelCase__ : Any = "gelu"
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=99, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=37, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=20, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=0, SCREAMING_SNAKE_CASE_=4, ) -> Optional[Any]:
UpperCamelCase : Optional[Any] = parent
UpperCamelCase : Union[str, Any] = batch_size
UpperCamelCase : Optional[Any] = seq_length
UpperCamelCase : List[Any] = is_training
UpperCamelCase : Union[str, Any] = use_labels
UpperCamelCase : Optional[int] = vocab_size
UpperCamelCase : str = hidden_size
UpperCamelCase : List[str] = num_hidden_layers
UpperCamelCase : Any = num_attention_heads
UpperCamelCase : List[str] = intermediate_size
UpperCamelCase : List[str] = hidden_dropout_prob
UpperCamelCase : List[Any] = attention_probs_dropout_prob
UpperCamelCase : List[Any] = max_position_embeddings
UpperCamelCase : Any = eos_token_id
UpperCamelCase : Tuple = pad_token_id
UpperCamelCase : Union[str, Any] = bos_token_id
UpperCamelCase : Dict = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
UpperCamelCase : int = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
UpperCamelCase : Optional[int] = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
UpperCamelCase : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
UpperCamelCase : Any = tf.concat([input_ids, eos_tensor], axis=1 )
UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCamelCase : Dict = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, attention_window=self.attention_window, **self.config_updates, )
UpperCamelCase : Any = prepare_led_inputs_dict(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = tf.concat(
[tf.zeros_like(SCREAMING_SNAKE_CASE_ )[:, :-1], tf.ones_like(SCREAMING_SNAKE_CASE_ )[:, -1:]], axis=-1, )
UpperCamelCase : Optional[Any] = global_attention_mask
return config, inputs_dict
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCamelCase : str = TFLEDModel(config=SCREAMING_SNAKE_CASE_ ).get_decoder()
UpperCamelCase : Any = inputs_dict['input_ids']
UpperCamelCase : List[Any] = input_ids[:1, :]
UpperCamelCase : Any = inputs_dict['attention_mask'][:1, :]
UpperCamelCase : str = 1
# first forward pass
UpperCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, use_cache=SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase : List[str] = ids_tensor((self.batch_size, 3), config.vocab_size )
UpperCamelCase : Union[str, Any] = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
UpperCamelCase : Tuple = tf.concat([input_ids, next_tokens], axis=-1 )
UpperCamelCase : str = tf.concat([attention_mask, next_attn_mask], axis=-1 )
UpperCamelCase : str = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_ )[0]
UpperCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, past_key_values=SCREAMING_SNAKE_CASE_ )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
UpperCamelCase : Dict = int(ids_tensor((1,), output_from_past.shape[-1] ) )
UpperCamelCase : Optional[int] = output_from_no_past[:, -3:, random_slice_idx]
UpperCamelCase : Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, rtol=1e-3 )
def UpperCamelCase ( snake_case__ : int , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Union[str, Any]=None , snake_case__ : Optional[Any]=None , snake_case__ : Union[str, Any]=None , snake_case__ : Tuple=None , ) -> Dict:
if attention_mask is None:
UpperCamelCase : Union[str, Any] = tf.cast(tf.math.not_equal(snake_case__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCamelCase : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase : int = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCamelCase : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
UpperCAmelCase__ : List[str] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__ : Any = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Union[str, Any] = False
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : str = TFLEDModelTester(self )
UpperCamelCase : Dict = ConfigTester(self, config_class=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Any:
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> List[str]:
UpperCamelCase , UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : List[str] = tf.zeros_like(inputs_dict['attention_mask'] )
UpperCamelCase : List[str] = 2
UpperCamelCase : List[Any] = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices, 1, inputs_dict['global_attention_mask'], )
UpperCamelCase : List[str] = True
UpperCamelCase : List[str] = self.model_tester.seq_length
UpperCamelCase : int = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[Any] = outputs.decoder_attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_length, seq_length], )
def check_encoder_attentions_output(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : str = [t.numpy() for t in outputs.encoder_attentions]
UpperCamelCase : Dict = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ), self.model_tester.num_hidden_layers )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_length, seq_length], )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices], )
for model_class in self.all_model_classes:
UpperCamelCase : List[str] = True
UpperCamelCase : Dict = False
UpperCamelCase : Optional[int] = False
UpperCamelCase : str = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = model(self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : Any = len(SCREAMING_SNAKE_CASE_ )
self.assertEqual(config.output_hidden_states, SCREAMING_SNAKE_CASE_ )
check_encoder_attentions_output(SCREAMING_SNAKE_CASE_ )
if self.is_encoder_decoder:
UpperCamelCase : Optional[int] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = model(self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(config.output_hidden_states, SCREAMING_SNAKE_CASE_ )
check_decoder_attentions_output(SCREAMING_SNAKE_CASE_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCamelCase : Tuple = True
UpperCamelCase : Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = model(self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(config.output_hidden_states, SCREAMING_SNAKE_CASE_ )
check_encoder_attentions_output(SCREAMING_SNAKE_CASE_ )
# Check attention is always last and order is fine
UpperCamelCase : List[Any] = True
UpperCamelCase : List[Any] = True
UpperCamelCase : Tuple = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = model(self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(model.config.output_hidden_states, SCREAMING_SNAKE_CASE_ )
check_encoder_attentions_output(SCREAMING_SNAKE_CASE_ )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def snake_case_ ( self ) -> Optional[Any]:
pass
def snake_case_ ( self ) -> Optional[int]:
# TODO: Head-masking not yet implement
pass
def UpperCamelCase ( snake_case__ : Optional[Any] ) -> Optional[int]:
return tf.constant(snake_case__ , dtype=tf.intaa )
__UpperCAmelCase = 1e-4
@slow
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> str:
UpperCamelCase : Optional[int] = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
UpperCamelCase : List[Any] = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
UpperCamelCase : Optional[Any] = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
UpperCamelCase : str = prepare_led_inputs_dict(model.config, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = model(**SCREAMING_SNAKE_CASE_ )[0]
UpperCamelCase : int = (1, 1024, 768)
self.assertEqual(output.shape, SCREAMING_SNAKE_CASE_ )
# change to expected output here
UpperCamelCase : Tuple = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]], )
tf.debugging.assert_near(output[:, :3, :3], SCREAMING_SNAKE_CASE_, atol=1e-3 )
def snake_case_ ( self ) -> Dict:
UpperCamelCase : Tuple = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
UpperCamelCase : int = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
UpperCamelCase : Dict = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
UpperCamelCase : Optional[int] = prepare_led_inputs_dict(model.config, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = model(**SCREAMING_SNAKE_CASE_ )[0]
UpperCamelCase : Any = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape, SCREAMING_SNAKE_CASE_ )
# change to expected output here
UpperCamelCase : Optional[Any] = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]], )
tf.debugging.assert_near(output[:, :3, :3], SCREAMING_SNAKE_CASE_, atol=1e-3, rtol=1e-3 )
| 119 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCamelCase ( snake_case__ : Dict , snake_case__ : Any=0.999 , snake_case__ : List[Any]="cosine" , ) -> Optional[int]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case__ : int ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case__ : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
UpperCamelCase : List[Any] = []
for i in range(snake_case__ ):
UpperCamelCase : Optional[Any] = i / num_diffusion_timesteps
UpperCamelCase : Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case__ ) / alpha_bar_fn(snake_case__ ) , snake_case__ ) )
return torch.tensor(snake_case__ , dtype=torch.floataa )
class lowerCAmelCase_ ( a__ , a__ ):
UpperCAmelCase__ : List[Any] = [e.name for e in KarrasDiffusionSchedulers]
UpperCAmelCase__ : List[str] = 2
@register_to_config
def __init__( self, SCREAMING_SNAKE_CASE_ = 1000, SCREAMING_SNAKE_CASE_ = 0.0_00_85, SCREAMING_SNAKE_CASE_ = 0.0_12, SCREAMING_SNAKE_CASE_ = "linear", SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = "epsilon", SCREAMING_SNAKE_CASE_ = "linspace", SCREAMING_SNAKE_CASE_ = 0, ) -> List[str]:
if trained_betas is not None:
UpperCamelCase : Union[str, Any] = torch.tensor(SCREAMING_SNAKE_CASE_, dtype=torch.floataa )
elif beta_schedule == "linear":
UpperCamelCase : List[Any] = torch.linspace(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCamelCase : int = (
torch.linspace(beta_start**0.5, beta_end**0.5, SCREAMING_SNAKE_CASE_, dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCamelCase : Optional[int] = betas_for_alpha_bar(SCREAMING_SNAKE_CASE_ )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
UpperCamelCase : Optional[int] = 1.0 - self.betas
UpperCamelCase : int = torch.cumprod(self.alphas, dim=0 )
# set all values
self.set_timesteps(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> str:
if schedule_timesteps is None:
UpperCamelCase : Union[str, Any] = self.timesteps
UpperCamelCase : Dict = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
UpperCamelCase : Optional[int] = 1 if len(SCREAMING_SNAKE_CASE_ ) > 1 else 0
else:
UpperCamelCase : str = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE_ ) else timestep
UpperCamelCase : int = self._index_counter[timestep_int]
return indices[pos].item()
@property
def snake_case_ ( self ) -> List[Any]:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> torch.FloatTensor:
UpperCamelCase : Optional[int] = self.index_for_timestep(SCREAMING_SNAKE_CASE_ )
if self.state_in_first_order:
UpperCamelCase : Dict = self.sigmas[step_index]
else:
UpperCamelCase : int = self.sigmas_interpol[step_index]
UpperCamelCase : Any = sample / ((sigma**2 + 1) ** 0.5)
return sample
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, ) -> Optional[int]:
UpperCamelCase : Dict = num_inference_steps
UpperCamelCase : List[Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
UpperCamelCase : int = np.linspace(0, num_train_timesteps - 1, SCREAMING_SNAKE_CASE_, dtype=SCREAMING_SNAKE_CASE_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
UpperCamelCase : List[Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCamelCase : Optional[Any] = (np.arange(0, SCREAMING_SNAKE_CASE_ ) * step_ratio).round()[::-1].copy().astype(SCREAMING_SNAKE_CASE_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
UpperCamelCase : Optional[int] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCamelCase : Any = (np.arange(SCREAMING_SNAKE_CASE_, 0, -step_ratio )).round().copy().astype(SCREAMING_SNAKE_CASE_ )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
UpperCamelCase : Tuple = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
UpperCamelCase : Optional[int] = torch.from_numpy(np.log(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = np.interp(SCREAMING_SNAKE_CASE_, np.arange(0, len(SCREAMING_SNAKE_CASE_ ) ), SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
UpperCamelCase : Optional[int] = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(device=SCREAMING_SNAKE_CASE_ )
# interpolate sigmas
UpperCamelCase : Union[str, Any] = sigmas.log().lerp(sigmas.roll(1 ).log(), 0.5 ).exp()
UpperCamelCase : Any = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
UpperCamelCase : Optional[Any] = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
# mps does not support float64
UpperCamelCase : Any = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_, dtype=torch.floataa )
else:
UpperCamelCase : Dict = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
# interpolate timesteps
UpperCamelCase : int = self.sigma_to_t(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_, dtype=timesteps.dtype )
UpperCamelCase : List[str] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]), dim=-1 ).flatten()
UpperCamelCase : Optional[Any] = torch.cat([timesteps[:1], interleaved_timesteps] )
UpperCamelCase : Optional[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
UpperCamelCase : Dict = defaultdict(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
# get log sigma
UpperCamelCase : List[Any] = sigma.log()
# get distribution
UpperCamelCase : Optional[int] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
UpperCamelCase : Optional[int] = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
UpperCamelCase : Tuple = low_idx + 1
UpperCamelCase : List[str] = self.log_sigmas[low_idx]
UpperCamelCase : Optional[Any] = self.log_sigmas[high_idx]
# interpolate sigmas
UpperCamelCase : int = (low - log_sigma) / (low - high)
UpperCamelCase : Tuple = w.clamp(0, 1 )
# transform interpolation to time range
UpperCamelCase : List[str] = (1 - w) * low_idx + w * high_idx
UpperCamelCase : Dict = t.view(sigma.shape )
return t
@property
def snake_case_ ( self ) -> Optional[int]:
return self.sample is None
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = True, ) -> Union[SchedulerOutput, Tuple]:
UpperCamelCase : str = self.index_for_timestep(SCREAMING_SNAKE_CASE_ )
# advance index counter by 1
UpperCamelCase : Optional[int] = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
UpperCamelCase : Tuple = self.sigmas[step_index]
UpperCamelCase : Dict = self.sigmas_interpol[step_index + 1]
UpperCamelCase : Optional[int] = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
UpperCamelCase : str = self.sigmas[step_index - 1]
UpperCamelCase : Dict = self.sigmas_interpol[step_index]
UpperCamelCase : Any = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
UpperCamelCase : Dict = 0
UpperCamelCase : Any = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
UpperCamelCase : Any = sigma_hat if self.state_in_first_order else sigma_interpol
UpperCamelCase : List[Any] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
UpperCamelCase : Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_interpol
UpperCamelCase : Optional[int] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('prediction_type not implemented yet: sample' )
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
UpperCamelCase : int = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
UpperCamelCase : Any = sigma_interpol - sigma_hat
# store for 2nd order step
UpperCamelCase : Tuple = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
UpperCamelCase : Union[str, Any] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
UpperCamelCase : Dict = sigma_next - sigma_hat
UpperCamelCase : Any = self.sample
UpperCamelCase : str = None
UpperCamelCase : Any = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
UpperCamelCase : Optional[Any] = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(SCREAMING_SNAKE_CASE_ ):
# mps does not support float64
UpperCamelCase : List[str] = self.timesteps.to(original_samples.device, dtype=torch.floataa )
UpperCamelCase : str = timesteps.to(original_samples.device, dtype=torch.floataa )
else:
UpperCamelCase : Dict = self.timesteps.to(original_samples.device )
UpperCamelCase : int = timesteps.to(original_samples.device )
UpperCamelCase : str = [self.index_for_timestep(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) for t in timesteps]
UpperCamelCase : List[Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
UpperCamelCase : int = sigma.unsqueeze(-1 )
UpperCamelCase : Any = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> Optional[int]:
return self.config.num_train_timesteps
| 119 | 1 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__UpperCAmelCase = 'platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def lowercase__ ( __snake_case : str , __snake_case : str , __snake_case : Tuple=None , __snake_case : Any=None , __snake_case : Dict=None , __snake_case : str=None , __snake_case : Optional[Any]=None , __snake_case : str=None , ):
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase_ : Optional[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
UpperCAmelCase_ : List[Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
UpperCAmelCase_ : Tuple = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ : int = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ : Union[str, Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase=1_3 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=9_9 , _UpperCamelCase=1_6 , _UpperCamelCase=2 , _UpperCamelCase=4 , _UpperCamelCase=4 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=3_2 , _UpperCamelCase=2 , _UpperCamelCase=1 , _UpperCamelCase=0 , _UpperCamelCase=0.02 , ) -> Dict:
UpperCAmelCase_ : Optional[Any] = parent
UpperCAmelCase_ : Union[str, Any] = batch_size
UpperCAmelCase_ : str = seq_length
UpperCAmelCase_ : Union[str, Any] = is_training
UpperCAmelCase_ : Optional[Any] = use_labels
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : str = num_hidden_layers
UpperCAmelCase_ : Optional[int] = num_attention_heads
UpperCAmelCase_ : Tuple = intermediate_size
UpperCAmelCase_ : Tuple = hidden_act
UpperCAmelCase_ : Dict = hidden_dropout_prob
UpperCAmelCase_ : Any = attention_probs_dropout_prob
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : Tuple = eos_token_id
UpperCAmelCase_ : Any = pad_token_id
UpperCAmelCase_ : Optional[Any] = bos_token_id
UpperCAmelCase_ : Any = initializer_range
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : str = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
UpperCAmelCase_ : List[str] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
UpperCAmelCase_ : Optional[Any] = shift_tokens_right(lowerCAmelCase__ , 1 , 2 )
UpperCAmelCase_ : Optional[Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCAmelCase__ , )
UpperCAmelCase_ : Optional[Any] = prepare_blenderbot_inputs_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return config, inputs_dict
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ , UpperCAmelCase_ : str = self.prepare_config_and_inputs()
return config, inputs_dict
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str:
UpperCAmelCase_ : Optional[int] = 2_0
UpperCAmelCase_ : int = model_class_name(lowerCAmelCase__ )
UpperCAmelCase_ : List[Any] = model.encode(inputs_dict['input_ids'] )
UpperCAmelCase_ , UpperCAmelCase_ : str = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
UpperCAmelCase_ : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ : List[str] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
UpperCAmelCase_ : List[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase_ : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , decoder_position_ids=lowerCAmelCase__ , )
UpperCAmelCase_ : Optional[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
UpperCAmelCase_ : Optional[int] = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase__ , )
UpperCAmelCase_ : Union[str, Any] = model.decode(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = 2_0
UpperCAmelCase_ : Any = model_class_name(lowerCAmelCase__ )
UpperCAmelCase_ : int = model.encode(inputs_dict['input_ids'] )
UpperCAmelCase_ , UpperCAmelCase_ : Any = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
UpperCAmelCase_ : int = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCAmelCase_ : int = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ : List[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase_ : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , decoder_position_ids=lowerCAmelCase__ , )
UpperCAmelCase_ : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
UpperCAmelCase_ : Optional[Any] = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase__ , decoder_position_ids=lowerCAmelCase__ , )
UpperCAmelCase_ : Optional[Any] = model.decode(lowerCAmelCase__ , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ )
UpperCAmelCase_ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
@require_flax
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
_snake_case : List[Any] = 9_9
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : Dict = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
UpperCAmelCase_ : Dict = input_ids.shape[0]
UpperCAmelCase_ : Dict = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = self._get_config_and_data()
UpperCAmelCase_ : Optional[int] = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase__ )
UpperCAmelCase_ : Dict = lm_model(input_ids=lowerCAmelCase__ )
UpperCAmelCase_ : List[str] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , lowerCAmelCase__ )
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : str = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
UpperCAmelCase_ : str = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase__ )
UpperCAmelCase_ : Optional[Any] = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
UpperCAmelCase_ : List[Any] = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
UpperCAmelCase_ : int = lm_model(input_ids=lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ )
UpperCAmelCase_ : Optional[int] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , lowerCAmelCase__ )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : List[str] = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
UpperCAmelCase_ : List[Any] = shift_tokens_right(lowerCAmelCase__ , 1 , 2 )
UpperCAmelCase_ : Union[str, Any] = np.equal(lowerCAmelCase__ , 1 ).astype(np.floataa ).sum()
UpperCAmelCase_ : Tuple = np.equal(lowerCAmelCase__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowerCAmelCase__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCamelCase (lowerCamelCase_ , unittest.TestCase , lowerCamelCase_ ):
'''simple docstring'''
_snake_case : Optional[Any] = True
_snake_case : List[Any] = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
_snake_case : Union[str, Any] = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : Dict = FlaxBlenderbotModelTester(self )
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ : List[Any] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ : Any = model_class(lowerCAmelCase__ )
@jax.jit
def encode_jitted(_UpperCamelCase , _UpperCamelCase=None , **_UpperCamelCase ):
return model.encode(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
with self.subTest('JIT Enabled' ):
UpperCAmelCase_ : List[str] = encode_jitted(**lowerCAmelCase__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCAmelCase_ : Optional[int] = encode_jitted(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for jitted_output, output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ : Optional[int] = model_class(lowerCAmelCase__ )
UpperCAmelCase_ : Optional[Any] = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
UpperCAmelCase_ : Dict = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return model.decode(
decoder_input_ids=lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , encoder_outputs=lowerCAmelCase__ , )
with self.subTest('JIT Enabled' ):
UpperCAmelCase_ : List[str] = decode_jitted(**lowerCAmelCase__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCAmelCase_ : str = decode_jitted(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for jitted_output, output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __UpperCAmelCase ( self ) -> List[Any]:
for model_class_name in self.all_model_classes:
UpperCAmelCase_ : Tuple = model_class_name.from_pretrained('facebook/blenderbot-400M-distill' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
UpperCAmelCase_ : Optional[int] = np.ones((1, 1) ) * model.config.eos_token_id
UpperCAmelCase_ : str = model(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@unittest.skipUnless(jax_device != 'cpu' , '3B test too slow on CPU.' )
@slow
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : int = {'num_beams': 1, 'early_stopping': True, 'min_length': 1_5, 'max_length': 2_5}
UpperCAmelCase_ : str = {'skip_special_tokens': True, 'clean_up_tokenization_spaces': True}
UpperCAmelCase_ : List[Any] = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B' , from_pt=lowerCAmelCase__ )
UpperCAmelCase_ : Tuple = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B' )
UpperCAmelCase_ : List[str] = ['Sam']
UpperCAmelCase_ : Optional[Any] = tokenizer(lowerCAmelCase__ , return_tensors='jax' )
UpperCAmelCase_ : Tuple = model.generate(**lowerCAmelCase__ , **lowerCAmelCase__ )
UpperCAmelCase_ : List[Any] = 'Sam is a great name. It means "sun" in Gaelic.'
UpperCAmelCase_ : str = tokenizer.batch_decode(lowerCAmelCase__ , **lowerCAmelCase__ )
assert generated_txt[0].strip() == tgt_text
| 366 |
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = [1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = 0, 0, 0
UpperCAmelCase_ : Union[str, Any] = ugly_nums[ia] * 2
UpperCAmelCase_ : Tuple = ugly_nums[ia] * 3
UpperCAmelCase_ : Union[str, Any] = ugly_nums[ia] * 5
for _ in range(1 , __snake_case ):
UpperCAmelCase_ : Tuple = min(__snake_case , __snake_case , __snake_case )
ugly_nums.append(__snake_case )
if next_num == next_a:
ia += 1
UpperCAmelCase_ : Union[str, Any] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
UpperCAmelCase_ : Any = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
UpperCAmelCase_ : List[str] = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F'{ugly_numbers(200) = }')
| 145 | 0 |
def _snake_case( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : int = 0 ) -> list:
'''simple docstring'''
A__ = length or len(SCREAMING_SNAKE_CASE__ )
A__ = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
A__ , A__ = list_data[i + 1], list_data[i]
A__ = True
return list_data if not swapped else bubble_sort(SCREAMING_SNAKE_CASE__ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Dict = "mobilenet_v2"
def __init__( self , __A=3 , __A=224 , __A=1.0 , __A=8 , __A=8 , __A=6 , __A=32 , __A=True , __A=True , __A="relu6" , __A=True , __A=0.8 , __A=0.02 , __A=0.001 , __A=255 , **__A , ):
"""simple docstring"""
super().__init__(**__A )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
lowerCamelCase : str = num_channels
lowerCamelCase : Any = image_size
lowerCamelCase : Union[str, Any] = depth_multiplier
lowerCamelCase : Tuple = depth_divisible_by
lowerCamelCase : Dict = min_depth
lowerCamelCase : Dict = expand_ratio
lowerCamelCase : Optional[Any] = output_stride
lowerCamelCase : int = first_layer_is_expansion
lowerCamelCase : Union[str, Any] = finegrained_output
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : Optional[Any] = tf_padding
lowerCamelCase : Optional[Any] = classifier_dropout_prob
lowerCamelCase : Dict = initializer_range
lowerCamelCase : str = layer_norm_eps
lowerCamelCase : Optional[Any] = semantic_loss_ignore_index
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = version.parse("1.11" )
@property
def _snake_case ( self ):
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def _snake_case ( self ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def _snake_case ( self ):
"""simple docstring"""
return 1e-4
| 283 | 0 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 356 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = tempfile.mkdtemp()
lowerCamelCase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowerCamelCase = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
lowerCamelCase = os.path.join(self.tmpdirname , A )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(A , A )
def __A ( self , **A ) -> Optional[Any]:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **A )
def __A ( self , **A ) -> List[Any]:
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **A )
def __A ( self , **A ) -> Optional[int]:
'''simple docstring'''
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **A )
def __A ( self ) -> List[str]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
lowerCamelCase = [Image.fromarray(np.moveaxis(A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = self.get_rust_tokenizer()
lowerCamelCase = self.get_image_processor()
lowerCamelCase = AlignProcessor(tokenizer=A , image_processor=A )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=A )
lowerCamelCase = AlignProcessor(tokenizer=A , image_processor=A )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A )
self.assertIsInstance(processor_fast.tokenizer , A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A )
self.assertIsInstance(processor_fast.image_processor , A )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCamelCase = self.get_image_processor(do_normalize=A , padding_value=1.0 )
lowerCamelCase = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A )
def __A ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = AlignProcessor(tokenizer=A , image_processor=A )
lowerCamelCase = self.prepare_image_inputs()
lowerCamelCase = image_processor(A , return_tensors="""np""" )
lowerCamelCase = processor(images=A , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = AlignProcessor(tokenizer=A , image_processor=A )
lowerCamelCase = """lower newer"""
lowerCamelCase = processor(text=A )
lowerCamelCase = tokenizer(A , padding="""max_length""" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = AlignProcessor(tokenizer=A , image_processor=A )
lowerCamelCase = """lower newer"""
lowerCamelCase = self.prepare_image_inputs()
lowerCamelCase = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = AlignProcessor(tokenizer=A , image_processor=A )
lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase = processor.batch_decode(A )
lowerCamelCase = tokenizer.batch_decode(A )
self.assertListEqual(A , A )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = AlignProcessor(tokenizer=A , image_processor=A )
lowerCamelCase = """lower newer"""
lowerCamelCase = self.prepare_image_inputs()
lowerCamelCase = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 66 | 0 |
'''simple docstring'''
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
a : List[Any] = """
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
a : Optional[int] = """\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
"""
a : str = """
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=[\"About 95 species are currently accepted .\"]
>>> predictions=[\"About 95 you now get in .\"]
>>> references=[[\"About 95 species are currently known .\"]]
>>> wiki_split = datasets.load_metric(\"wiki_split\")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}
"""
def __lowerCamelCase ( _lowercase ) -> str:
def remove_articles(_lowercase ):
UpperCAmelCase : Tuple = re.compile(R"""\b(a|an|the)\b""" , re.UNICODE )
return re.sub(_lowerCAmelCase , """ """ , _lowerCAmelCase )
def white_space_fix(_lowercase ):
return " ".join(text.split() )
def remove_punc(_lowercase ):
UpperCAmelCase : int = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowercase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) )
def __lowerCamelCase ( _lowercase , _lowercase ) -> Any:
return int(normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase ) )
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
UpperCAmelCase : Tuple = [any(compute_exact(_lowerCAmelCase , _lowerCAmelCase ) for ref in refs ) for pred, refs in zip(_lowerCAmelCase , _lowerCAmelCase )]
return (sum(_lowerCAmelCase ) / len(_lowerCAmelCase )) * 1_0_0
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = [rgram for rgrams in rgramslist for rgram in rgrams]
UpperCAmelCase : Union[str, Any] = Counter(_lowerCAmelCase )
UpperCAmelCase : Optional[int] = Counter(_lowerCAmelCase )
UpperCAmelCase : List[Any] = Counter()
for sgram, scount in sgramcounter.items():
UpperCAmelCase : Tuple = scount * numref
UpperCAmelCase : Union[str, Any] = Counter(_lowerCAmelCase )
UpperCAmelCase : Tuple = Counter()
for cgram, ccount in cgramcounter.items():
UpperCAmelCase : Dict = ccount * numref
# KEEP
UpperCAmelCase : List[Any] = sgramcounter_rep & cgramcounter_rep
UpperCAmelCase : Union[str, Any] = keepgramcounter_rep & rgramcounter
UpperCAmelCase : Dict = sgramcounter_rep & rgramcounter
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : Tuple = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCAmelCase : Any = 1
UpperCAmelCase : Any = 1
if len(_lowerCAmelCase ) > 0:
UpperCAmelCase : Dict = keeptmpscorea / len(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
UpperCAmelCase : Union[str, Any] = keeptmpscorea / sum(keepgramcounterall_rep.values() )
UpperCAmelCase : Any = 0
if keepscore_precision > 0 or keepscore_recall > 0:
UpperCAmelCase : List[str] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
UpperCAmelCase : Any = sgramcounter_rep - cgramcounter_rep
UpperCAmelCase : str = delgramcounter_rep - rgramcounter
UpperCAmelCase : Any = sgramcounter_rep - rgramcounter
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : Union[str, Any] = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCAmelCase : Dict = 1
if len(_lowerCAmelCase ) > 0:
UpperCAmelCase : str = deltmpscorea / len(_lowerCAmelCase )
# ADDITION
UpperCAmelCase : List[str] = set(_lowerCAmelCase ) - set(_lowerCAmelCase )
UpperCAmelCase : List[str] = set(_lowerCAmelCase ) & set(_lowerCAmelCase )
UpperCAmelCase : Dict = set(_lowerCAmelCase ) - set(_lowerCAmelCase )
UpperCAmelCase : Optional[Any] = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCAmelCase : Tuple = 1
UpperCAmelCase : Tuple = 1
if len(_lowerCAmelCase ) > 0:
UpperCAmelCase : Dict = addtmpscore / len(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
UpperCAmelCase : Tuple = addtmpscore / len(_lowerCAmelCase )
UpperCAmelCase : List[str] = 0
if addscore_precision > 0 or addscore_recall > 0:
UpperCAmelCase : List[str] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]:
UpperCAmelCase : int = len(_lowerCAmelCase )
UpperCAmelCase : Optional[Any] = ssent.split(""" """ )
UpperCAmelCase : Dict = csent.split(""" """ )
UpperCAmelCase : str = []
UpperCAmelCase : Any = []
UpperCAmelCase : Any = []
UpperCAmelCase : Union[str, Any] = []
UpperCAmelCase : str = []
UpperCAmelCase : str = []
UpperCAmelCase : Dict = []
UpperCAmelCase : int = []
UpperCAmelCase : Optional[Any] = []
UpperCAmelCase : Tuple = []
for rsent in rsents:
UpperCAmelCase : List[Any] = rsent.split(""" """ )
UpperCAmelCase : List[str] = []
UpperCAmelCase : int = []
UpperCAmelCase : Tuple = []
ragramslist.append(_lowerCAmelCase )
for i in range(0 , len(_lowerCAmelCase ) - 1 ):
if i < len(_lowerCAmelCase ) - 1:
UpperCAmelCase : Dict = ragrams[i] + " " + ragrams[i + 1]
ragrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 2:
UpperCAmelCase : Dict = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2]
ragrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 3:
UpperCAmelCase : List[Any] = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3]
ragrams.append(_lowerCAmelCase )
ragramslist.append(_lowerCAmelCase )
ragramslist.append(_lowerCAmelCase )
ragramslist.append(_lowerCAmelCase )
for i in range(0 , len(_lowerCAmelCase ) - 1 ):
if i < len(_lowerCAmelCase ) - 1:
UpperCAmelCase : Union[str, Any] = sagrams[i] + " " + sagrams[i + 1]
sagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 2:
UpperCAmelCase : List[str] = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2]
sagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 3:
UpperCAmelCase : Optional[int] = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3]
sagrams.append(_lowerCAmelCase )
for i in range(0 , len(_lowerCAmelCase ) - 1 ):
if i < len(_lowerCAmelCase ) - 1:
UpperCAmelCase : Union[str, Any] = cagrams[i] + " " + cagrams[i + 1]
cagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 2:
UpperCAmelCase : Optional[Any] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2]
cagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 3:
UpperCAmelCase : Union[str, Any] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3]
cagrams.append(_lowerCAmelCase )
(UpperCAmelCase) : Optional[int] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
(UpperCAmelCase) : Optional[Any] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
(UpperCAmelCase) : str = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
(UpperCAmelCase) : Optional[int] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : Tuple = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
UpperCAmelCase : str = sum([delascore, delascore, delascore, delascore] ) / 4
UpperCAmelCase : Union[str, Any] = sum([addascore, addascore, addascore, addascore] ) / 4
UpperCAmelCase : Union[str, Any] = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def __lowerCamelCase ( _lowercase , _lowercase = True , _lowercase = "13a" , _lowercase = True ) -> Optional[Any]:
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
UpperCAmelCase : Dict = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
UpperCAmelCase : str = sacrebleu.metrics.bleu._get_tokenizer(_lowerCAmelCase )()(_lowerCAmelCase )
else:
UpperCAmelCase : Dict = sacrebleu.TOKENIZERS[tokenizer]()(_lowerCAmelCase )
elif tokenizer == "moses":
UpperCAmelCase : Union[str, Any] = sacremoses.MosesTokenizer().tokenize(_lowerCAmelCase , return_str=_lowerCAmelCase , escape=_lowerCAmelCase )
elif tokenizer == "penn":
UpperCAmelCase : str = sacremoses.MosesTokenizer().penn_tokenize(_lowerCAmelCase , return_str=_lowerCAmelCase )
else:
UpperCAmelCase : Union[str, Any] = sentence
if not return_str:
UpperCAmelCase : Tuple = normalized_sent.split()
return normalized_sent
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]:
if not (len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == len(_lowerCAmelCase )):
raise ValueError("""Sources length must match predictions and references lengths.""" )
UpperCAmelCase : Optional[Any] = 0
for src, pred, refs in zip(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
sari_score += SARIsent(normalize(_lowerCAmelCase ) , normalize(_lowerCAmelCase ) , [normalize(_lowerCAmelCase ) for sent in refs] )
UpperCAmelCase : Optional[int] = sari_score / len(_lowerCAmelCase )
return 1_0_0 * sari_score
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase="exp" , _lowercase=None , _lowercase=False , _lowercase=False , _lowercase=False , ) -> List[str]:
UpperCAmelCase : Optional[Any] = len(references[0] )
if any(len(_lowerCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
UpperCAmelCase : Optional[int] = [[refs[i] for refs in references] for i in range(_lowerCAmelCase )]
UpperCAmelCase : Tuple = sacrebleu.corpus_bleu(
_lowerCAmelCase , _lowerCAmelCase , smooth_method=_lowerCAmelCase , smooth_value=_lowerCAmelCase , force=_lowerCAmelCase , lowercase=_lowerCAmelCase , use_effective_order=_lowerCAmelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def _lowercase( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=[
"""https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py""",
"""https://github.com/cocoxu/simplification/blob/master/SARI.py""",
"""https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py""",
"""https://github.com/mjpost/sacreBLEU""",
] , reference_urls=[
"""https://www.aclweb.org/anthology/Q16-1029.pdf""",
"""https://github.com/mjpost/sacreBLEU""",
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def _lowercase( self , A , A , A ) -> Dict:
UpperCAmelCase : Optional[Any] = {}
result.update({"""sari""": compute_sari(sources=A_ , predictions=A_ , references=A_ )} )
result.update({"""sacrebleu""": compute_sacrebleu(predictions=A_ , references=A_ )} )
result.update({"""exact""": compute_em(predictions=A_ , references=A_ )} )
return result
| 265 |
def A_ ( _lowerCAmelCase ) -> str:
UpperCamelCase : List[Any] = ""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def A_ ( _lowerCAmelCase ) -> dict[str, str]:
UpperCamelCase : Optional[Any] = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
UpperCamelCase : Tuple = remove_duplicates(key.upper() )
UpperCamelCase : int = len(_lowerCAmelCase )
# First fill cipher with key characters
UpperCamelCase : int = {alphabet[i]: char for i, char in enumerate(_lowerCAmelCase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_lowerCAmelCase ) , 26 ):
UpperCamelCase : Optional[Any] = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
UpperCamelCase : List[str] = alphabet[i - offset]
UpperCamelCase : List[Any] = char
return cipher_alphabet
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
return "".join(cipher_map.get(_lowerCAmelCase , _lowerCAmelCase ) for ch in message.upper() )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
UpperCamelCase : Union[str, Any] = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_lowerCAmelCase , _lowerCAmelCase ) for ch in message.upper() )
def A_ ( ) -> None:
UpperCamelCase : int = input("Enter message to encode or decode: " ).strip()
UpperCamelCase : str = input("Enter keyword: " ).strip()
UpperCamelCase : Union[str, Any] = input("Encipher or decipher? E/D:" ).strip()[0].lower()
try:
UpperCamelCase : List[str] = {"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option" )
UpperCamelCase : str = create_cipher_map(_lowerCAmelCase )
print(func(_lowerCAmelCase , _lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 52 | 0 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class UpperCamelCase__:
def __init__( self : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : List[str]=13 , lowerCAmelCase : Dict=2 , lowerCAmelCase : Any=24 , lowerCAmelCase : Dict=16 , lowerCAmelCase : Any=True , lowerCAmelCase : int=True , lowerCAmelCase : str=32 , lowerCAmelCase : List[Any]=5 , lowerCAmelCase : Optional[Any]=4 , lowerCAmelCase : List[str]=37 , lowerCAmelCase : List[Any]="gelu" , lowerCAmelCase : Any=0.1 , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : str=10 , lowerCAmelCase : int=0.02 , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[str]=2 , lowerCAmelCase : Optional[Any]=2 , )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = patch_size
UpperCAmelCase = max_length
UpperCAmelCase = num_mel_bins
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = scope
UpperCAmelCase = frequency_stride
UpperCAmelCase = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
UpperCAmelCase = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
UpperCAmelCase = (self.max_length - self.patch_size) // self.time_stride + 1
UpperCAmelCase = frequency_out_dimension * time_out_dimension
UpperCAmelCase = num_patches + 2
def a__( self : Union[str, Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = self.get_config()
return config, input_values, labels
def a__( self : List[str] )-> Union[str, Any]:
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def a__( self : int , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : str )-> Any:
"""simple docstring"""
UpperCAmelCase = ASTModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__( self : List[str] )-> int:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
__magic_name__ : Union[str, Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
__magic_name__ : Union[str, Any] = (
{"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel}
if is_torch_available()
else {}
)
__magic_name__ : str = False
__magic_name__ : Optional[Any] = False
__magic_name__ : int = False
__magic_name__ : Tuple = False
def a__( self : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : Dict )-> Optional[int]:
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def a__( self : Dict )-> Dict:
"""simple docstring"""
UpperCAmelCase = ASTModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def a__( self : Union[str, Any] )-> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def a__( self : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
pass
def a__( self : Dict )-> List[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def a__( self : int )-> Any:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(_lowerCAmelCase )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''input_values''']
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def a__( self : Dict )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
@slow
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = ASTModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
UpperCAmelCase , UpperCAmelCase = torchaudio.load(_lowerCAmelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class UpperCamelCase__( unittest.TestCase ):
@cached_property
def a__( self : Tuple )-> List[str]:
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def a__( self : List[Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = self.default_feature_extractor
UpperCAmelCase = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(_lowerCAmelCase )
UpperCAmelCase = self.default_feature_extractor
UpperCAmelCase , UpperCAmelCase = prepare_audio()
UpperCAmelCase = audio.squeeze().numpy()
UpperCAmelCase = feature_extractor(_lowerCAmelCase , sampling_rate=_lowerCAmelCase , return_tensors='''pt''' ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**_lowerCAmelCase )
# verify the logits
UpperCAmelCase = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
UpperCAmelCase = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 354 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCamelCase__:
__magic_name__ : int
__magic_name__ : TreeNode | None = None
__magic_name__ : TreeNode | None = None
_lowercase : Tuple = namedtuple("""CoinsDistribResult""", """moves excess""")
def lowerCamelCase__ ( A : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(A : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(A : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(A ) != count_coins(A ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(A : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase , UpperCAmelCase = get_distrib(node.left )
UpperCAmelCase , UpperCAmelCase = get_distrib(node.right )
UpperCAmelCase = 1 - left_distrib_excess
UpperCAmelCase = 1 - right_distrib_excess
UpperCAmelCase = (
left_distrib_moves
+ right_distrib_moves
+ abs(A )
+ abs(A )
)
UpperCAmelCase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(A , A )
return get_distrib(A )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 91 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.