code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase : Any ="""▁"""
__lowercase : str =get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class A ( __lowercase , unittest.TestCase ):
_snake_case =BertGenerationTokenizer
_snake_case =False
_snake_case =True
def lowerCAmelCase__ ( self: int ) -> Dict:
'''simple docstring'''
super().setUp()
UpperCAmelCase_ =BertGenerationTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self: List[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ ="<s>"
UpperCAmelCase_ =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) , _lowerCAmelCase )
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(_lowerCAmelCase ) , 1002 )
def lowerCAmelCase__ ( self: Any ) -> Dict:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def lowerCAmelCase__ ( self: List[str] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =BertGenerationTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.tokenize("This is a test" )
self.assertListEqual(_lowerCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [285, 46, 10, 170, 382] , )
UpperCAmelCase_ =tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ =tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase_ =tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def lowerCAmelCase__ ( self: str ) -> Optional[Any]:
'''simple docstring'''
return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
@slow
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ ="Hello World!"
UpperCAmelCase_ =[1_8536, 2260, 101]
self.assertListEqual(_lowerCAmelCase , self.big_tokenizer.encode(_lowerCAmelCase ) )
@slow
def lowerCAmelCase__ ( self: Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =(
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
UpperCAmelCase_ =[
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
3_4324,
497,
391,
408,
1_1342,
1244,
385,
100,
938,
985,
456,
574,
362,
1_2597,
3200,
3129,
1172,
]
self.assertListEqual(_lowerCAmelCase , self.big_tokenizer.encode(_lowerCAmelCase ) )
@require_torch
@slow
def lowerCAmelCase__ ( self: Tuple ) -> Any:
'''simple docstring'''
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
UpperCAmelCase_ =list(self.big_tokenizer.get_vocab().keys() )[:10]
UpperCAmelCase_ =" ".join(_lowerCAmelCase )
UpperCAmelCase_ =self.big_tokenizer.encode_plus(_lowerCAmelCase , return_tensors="pt" , return_token_type_ids=_lowerCAmelCase )
UpperCAmelCase_ =self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=_lowerCAmelCase )
UpperCAmelCase_ =BertGenerationConfig()
UpperCAmelCase_ =BertGenerationEncoder(_lowerCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_lowerCAmelCase )
model(**_lowerCAmelCase )
@slow
def lowerCAmelCase__ ( self: str ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ ={"input_ids": [[3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114], [448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase , model_name="google/bert_for_seq_generation_L-24_bbc_encoder" , revision="c817d1fd1be2ffa69431227a1fe320544943d4db" , )
| 54 |
'''simple docstring'''
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def _UpperCAmelCase ( __A : int , __A : Tuple="shi-labs/oneformer_demo" ):
with open(hf_hub_download(__A , __A , repo_type='''dataset''' ) , '''r''' ) as f:
a_ : Optional[Any] = json.load(__A )
a_ : List[Any] = {}
a_ : List[Any] = []
a_ : Tuple = []
for key, info in class_info.items():
a_ : Tuple = info['''name''']
class_names.append(info['''name'''] )
if info["isthing"]:
thing_ids.append(int(__A ) )
a_ : Optional[Any] = thing_ids
a_ : str = class_names
return metadata
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str]=7 , __SCREAMING_SNAKE_CASE : List[Any]=3 , __SCREAMING_SNAKE_CASE : List[Any]=30 , __SCREAMING_SNAKE_CASE : str=400 , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : str=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : Optional[int]=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : Optional[int]=10 , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : Optional[int]=255 , __SCREAMING_SNAKE_CASE : List[Any]="shi-labs/oneformer_demo" , __SCREAMING_SNAKE_CASE : List[str]="ade20k_panoptic.json" , __SCREAMING_SNAKE_CASE : List[Any]=10 , ) -> Dict:
a_ : int = parent
a_ : Optional[Any] = batch_size
a_ : str = num_channels
a_ : Tuple = min_resolution
a_ : List[Any] = max_resolution
a_ : List[Any] = do_resize
a_ : Union[str, Any] = {'''shortest_edge''': 32, '''longest_edge''': 1333} if size is None else size
a_ : Dict = do_normalize
a_ : Union[str, Any] = image_mean
a_ : Dict = image_std
a_ : int = class_info_file
a_ : List[Any] = prepare_metadata(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Optional[int] = num_text
a_ : Any = repo_path
# for the post_process_functions
a_ : List[str] = 2
a_ : Tuple = 10
a_ : Union[str, Any] = 10
a_ : Dict = 3
a_ : int = 4
a_ : Optional[Any] = num_labels
a_ : Union[str, Any] = do_reduce_labels
a_ : Tuple = ignore_index
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def SCREAMING_SNAKE_CASE ( self : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str]=False ) -> Optional[Any]:
if not batched:
a_ : List[Any] = image_inputs[0]
if isinstance(__SCREAMING_SNAKE_CASE , Image.Image ):
a_ , a_ : List[str] = image.size
else:
a_ , a_ : Any = image.shape[1], image.shape[2]
if w < h:
a_ : int = int(self.size['''shortest_edge'''] * h / w )
a_ : Union[str, Any] = self.size['''shortest_edge''']
elif w > h:
a_ : Any = self.size['''shortest_edge''']
a_ : Any = int(self.size['''shortest_edge'''] * w / h )
else:
a_ : Optional[int] = self.size['''shortest_edge''']
a_ : int = self.size['''shortest_edge''']
else:
a_ : int = []
for image in image_inputs:
a_ , a_ : List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a_ : List[str] = max(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : item[0] )[0]
a_ : str = max(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
snake_case__ = image_processing_class
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
a_ : Optional[int] = OneFormerImageProcessorTester(self )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
return self.image_processing_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
a_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_mean''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_std''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''ignore_index''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''class_info_file''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''num_text''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''repo_path''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''metadata''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_reduce_labels''' ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
pass
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
# Initialize image_processor
a_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ : Optional[int] = prepare_image_inputs(self.image_processing_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
a_ : Dict = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
a_ , a_ : Optional[int] = self.image_processing_tester.get_expected_values(__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ , a_ : Union[str, Any] = self.image_processing_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE )
a_ : Any = image_processor(
__SCREAMING_SNAKE_CASE , ['''semantic'''] * len(__SCREAMING_SNAKE_CASE ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
# Initialize image_processor
a_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a_ : Tuple = prepare_image_inputs(self.image_processing_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
a_ : List[Any] = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
a_ , a_ : Union[str, Any] = self.image_processing_tester.get_expected_values(__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ , a_ : Union[str, Any] = self.image_processing_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE )
a_ : Tuple = image_processor(
__SCREAMING_SNAKE_CASE , ['''semantic'''] * len(__SCREAMING_SNAKE_CASE ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
# Initialize image_processor
a_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a_ : Tuple = prepare_image_inputs(self.image_processing_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
a_ : str = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
a_ , a_ : int = self.image_processing_tester.get_expected_values(__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ , a_ : Tuple = self.image_processing_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE )
a_ : int = image_processor(
__SCREAMING_SNAKE_CASE , ['''semantic'''] * len(__SCREAMING_SNAKE_CASE ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : str="np" ) -> Any:
a_ : Dict = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
a_ : Optional[Any] = self.image_processing_tester.num_labels
a_ : Union[str, Any] = None
a_ : int = None
a_ : Dict = prepare_image_inputs(self.image_processing_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
if with_segmentation_maps:
a_ : List[str] = num_labels
if is_instance_map:
a_ : str = list(range(__SCREAMING_SNAKE_CASE ) ) * 2
a_ : str = dict(enumerate(__SCREAMING_SNAKE_CASE ) )
a_ : List[str] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
a_ : Any = [Image.fromarray(__SCREAMING_SNAKE_CASE ) for annotation in annotations]
a_ : Dict = image_processor(
__SCREAMING_SNAKE_CASE , ['''semantic'''] * len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , return_tensors='''pt''' , instance_id_to_semantic_id=__SCREAMING_SNAKE_CASE , pad_and_return_pixel_mask=__SCREAMING_SNAKE_CASE , )
return inputs
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
pass
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
def common(__SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : List[str]=None ):
a_ : Tuple = self.comm_get_image_processor_inputs(
with_segmentation_maps=__SCREAMING_SNAKE_CASE , is_instance_map=__SCREAMING_SNAKE_CASE , segmentation_type=__SCREAMING_SNAKE_CASE )
a_ : List[Any] = inputs['''mask_labels''']
a_ : Any = inputs['''class_labels''']
a_ : Any = inputs['''pixel_values''']
a_ : Optional[Any] = inputs['''text_inputs''']
# check the batch_size
for mask_label, class_label, text_input in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=__SCREAMING_SNAKE_CASE )
common(is_instance_map=__SCREAMING_SNAKE_CASE , segmentation_type='''pil''' )
common(is_instance_map=__SCREAMING_SNAKE_CASE , segmentation_type='''pil''' )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
a_ : int = np.zeros((20, 50) )
a_ : Dict = 1
a_ : Optional[Any] = 1
a_ : Dict = 1
a_ : Tuple = binary_mask_to_rle(__SCREAMING_SNAKE_CASE )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
a_ : List[str] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
a_ : Any = self.image_processing_tester.get_fake_oneformer_outputs()
a_ : str = fature_extractor.post_process_semantic_segmentation(__SCREAMING_SNAKE_CASE )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
a_ : Dict = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
a_ : List[str] = fature_extractor.post_process_semantic_segmentation(__SCREAMING_SNAKE_CASE , target_sizes=__SCREAMING_SNAKE_CASE )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
a_ : Optional[Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
a_ : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
a_ : Tuple = image_processor.post_process_instance_segmentation(__SCREAMING_SNAKE_CASE , threshold=0 )
self.assertTrue(len(__SCREAMING_SNAKE_CASE ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
a_ : List[Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
a_ : int = self.image_processing_tester.get_fake_oneformer_outputs()
a_ : str = image_processor.post_process_panoptic_segmentation(__SCREAMING_SNAKE_CASE , threshold=0 )
self.assertTrue(len(__SCREAMING_SNAKE_CASE ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 466 | 0 |
import numpy as np
lowerCAmelCase__ = [
['''a''', '''b''', '''c''', '''d''', '''e'''],
['''f''', '''g''', '''h''', '''i''', '''k'''],
['''l''', '''m''', '''n''', '''o''', '''p'''],
['''q''', '''r''', '''s''', '''t''', '''u'''],
['''v''', '''w''', '''x''', '''y''', '''z'''],
]
class snake_case__:
"""simple docstring"""
def __init__( self : Dict ):
lowercase__ : List[str] = np.array(SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : str ):
lowercase__ , lowercase__ : str = np.where(letter == self.SQUARE )
lowercase__ : List[str] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def snake_case ( self : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
lowercase__ : List[Any] = self.SQUARE[indexa - 1, indexa - 1]
return letter
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : str ):
lowercase__ : int = message.lower()
lowercase__ : Any = message.replace(" " , "" )
lowercase__ : Union[str, Any] = message.replace("j" , "i" )
lowercase__ : Optional[Any] = np.empty((2, len(SCREAMING_SNAKE_CASE )) )
for letter_index in range(len(SCREAMING_SNAKE_CASE ) ):
lowercase__ : Optional[int] = self.letter_to_numbers(message[letter_index] )
lowercase__ : Union[str, Any] = numbers[0]
lowercase__ : Dict = numbers[1]
lowercase__ : str = first_step.reshape(2 * len(SCREAMING_SNAKE_CASE ) )
lowercase__ : int = ""
for numbers_index in range(len(SCREAMING_SNAKE_CASE ) ):
lowercase__ : int = int(second_step[numbers_index * 2] )
lowercase__ : Optional[Any] = int(second_step[(numbers_index * 2) + 1] )
lowercase__ : List[str] = self.numbers_to_letter(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = encoded_message + letter
return encoded_message
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : str ):
lowercase__ : List[Any] = message.lower()
message.replace(" " , "" )
lowercase__ : List[Any] = np.empty(2 * len(SCREAMING_SNAKE_CASE ) )
for letter_index in range(len(SCREAMING_SNAKE_CASE ) ):
lowercase__ : Tuple = self.letter_to_numbers(message[letter_index] )
lowercase__ : Optional[Any] = numbers[0]
lowercase__ : Any = numbers[1]
lowercase__ : Any = first_step.reshape((2, len(SCREAMING_SNAKE_CASE )) )
lowercase__ : List[Any] = ""
for numbers_index in range(len(SCREAMING_SNAKE_CASE ) ):
lowercase__ : Union[str, Any] = int(second_step[0, numbers_index] )
lowercase__ : List[Any] = int(second_step[1, numbers_index] )
lowercase__ : List[str] = self.numbers_to_letter(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Dict = decoded_message + letter
return decoded_message
| 81 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case__:
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int=13 , SCREAMING_SNAKE_CASE : Union[str, Any]=30 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Optional[Any]=3 , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : List[str]=32 , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : str=4 , SCREAMING_SNAKE_CASE : List[Any]=37 , SCREAMING_SNAKE_CASE : Tuple="gelu" , SCREAMING_SNAKE_CASE : List[str]=0.1 , SCREAMING_SNAKE_CASE : List[Any]=0.1 , SCREAMING_SNAKE_CASE : int=10 , SCREAMING_SNAKE_CASE : List[str]=0.02 , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : str=0.6 , SCREAMING_SNAKE_CASE : Optional[Any]=None , ):
lowercase__ : Union[str, Any] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : List[Any] = patch_size
lowercase__ : Any = num_channels
lowercase__ : Optional[int] = is_training
lowercase__ : Dict = use_labels
lowercase__ : Any = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : Union[str, Any] = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : Optional[int] = hidden_act
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : Union[str, Any] = attention_probs_dropout_prob
lowercase__ : List[Any] = type_sequence_label_size
lowercase__ : Any = initializer_range
lowercase__ : Optional[int] = mask_ratio
lowercase__ : Union[str, Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowercase__ : List[Any] = (image_size // patch_size) ** 2
lowercase__ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def snake_case ( self : int ):
lowercase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : str = None
if self.use_labels:
lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Tuple ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : Tuple = TFViTMAEModel(config=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] ):
lowercase__ : Union[str, Any] = TFViTMAEForPreTraining(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
# expected sequence length = num_patches
lowercase__ : List[str] = (self.image_size // self.patch_size) ** 2
lowercase__ : List[Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowercase__ : Dict = 1
lowercase__ : List[Any] = TFViTMAEForPreTraining(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def snake_case ( self : Optional[int] ):
lowercase__ : int = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__)) : Dict = config_and_inputs
lowercase__ : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
lowercase_ = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : List[str] ):
lowercase__ : List[Any] = TFViTMAEModelTester(self )
lowercase__ : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def snake_case ( self : Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def snake_case ( self : Union[str, Any] ):
pass
def snake_case ( self : Optional[int] ):
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[Any] = model_class(SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase__ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , tf.keras.layers.Layer ) )
def snake_case ( self : Optional[Any] ):
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Union[str, Any] = [*signature.parameters.keys()]
lowercase__ : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
# make the mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ : Optional[Any] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : int = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : Any = copy.deepcopy(self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = outputs_dict[0].numpy()
lowercase__ : Optional[int] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def snake_case ( self : str ):
# make the mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Optional[Any] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase__ : Tuple = {}
for k, v in inputs_dict.items():
if tf.is_tensor(SCREAMING_SNAKE_CASE ):
lowercase__ : Any = v.numpy()
else:
lowercase__ : List[Any] = np.array(SCREAMING_SNAKE_CASE )
return inputs_np_dict
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Any = prepare_numpy_arrays(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
# make masks reproducible
np.random.seed(2 )
lowercase__ : Optional[int] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowercase__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ : Union[str, Any] = tf.constant(SCREAMING_SNAKE_CASE )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowercase__ : Optional[int] = tf_noise
super().check_pt_tf_models(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
# make mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : int = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(SCREAMING_SNAKE_CASE )
if module_member_name.endswith("MainLayer" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("MainLayer" )] == model_class.__name__[: -len("Model" )]
for module_member in (getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),)
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(SCREAMING_SNAKE_CASE , "_keras_serializable" , SCREAMING_SNAKE_CASE )
}
lowercase__ : List[str] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ : str = tf.convert_to_tensor(SCREAMING_SNAKE_CASE )
inputs_dict.update({"noise": noise} )
for main_layer_class in tf_main_layer_classes:
lowercase__ : Tuple = main_layer_class(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowercase__ : Tuple = tf.keras.Model(SCREAMING_SNAKE_CASE , outputs=main_layer(SCREAMING_SNAKE_CASE ) )
lowercase__ : str = model(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : str = os.path.join(SCREAMING_SNAKE_CASE , "keras_model.h5" )
model.save(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = tf.keras.models.load_model(
SCREAMING_SNAKE_CASE , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(SCREAMING_SNAKE_CASE , tf.keras.Model )
lowercase__ : Dict = model(SCREAMING_SNAKE_CASE )
self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self : Optional[int] ):
# make mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ : str = outputs.last_hidden_state.numpy()
lowercase__ : Optional[Any] = 0
else:
lowercase__ : Optional[Any] = outputs.logits.numpy()
lowercase__ : Optional[int] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE , saved_model=SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = model_class.from_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ : Optional[int] = after_outputs["last_hidden_state"].numpy()
lowercase__ : Optional[int] = 0
else:
lowercase__ : str = after_outputs["logits"].numpy()
lowercase__ : Tuple = 0
lowercase__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(SCREAMING_SNAKE_CASE , 1E-5 )
def snake_case ( self : List[Any] ):
# make mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[str] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ : Tuple = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : int = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : str = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(SCREAMING_SNAKE_CASE )
lowercase__ : int = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowercase__ : Any = model_class.from_config(model.config )
lowercase__ : Tuple = new_model(SCREAMING_SNAKE_CASE ) # Build model
new_model.set_weights(model.get_weights() )
lowercase__ : Union[str, Any] = new_model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def snake_case ( self : List[Any] ):
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def snake_case ( self : str ):
pass
@slow
def snake_case ( self : List[Any] ):
lowercase__ : List[Any] = TFViTMAEModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : Any ):
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def snake_case ( self : Union[str, Any] ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowercase__ : Optional[Any] = TFViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" )
lowercase__ : Optional[Any] = self.default_image_processor
lowercase__ : Union[str, Any] = prepare_img()
lowercase__ : Tuple = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="tf" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowercase__ : Union[str, Any] = ViTMAEConfig()
lowercase__ : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowercase__ : List[str] = np.random.uniform(size=(1, num_patches) )
# forward pass
lowercase__ : Optional[Any] = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
# verify the logits
lowercase__ : List[str] = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = tf.convert_to_tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
| 81 | 1 |
from torch import nn
def lowercase_ (A : Union[str, Any] ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'''Unsupported activation function: {act_fn}''' )
| 478 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
a_ :List[str] = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int, _snake_case : Union[str, Any], _snake_case : List[str]=7, _snake_case : int=3, _snake_case : List[Any]=1_8, _snake_case : List[str]=3_0, _snake_case : str=4_0_0, _snake_case : Optional[Any]=None, _snake_case : Dict=True, _snake_case : str=True, _snake_case : Union[str, Any]=None, ) ->str:
snake_case__ : int = size if size is not None else {'height': 2_0, 'width': 2_0}
snake_case__ : Optional[Any] = parent
snake_case__ : Optional[Any] = batch_size
snake_case__ : List[Any] = num_channels
snake_case__ : List[str] = image_size
snake_case__ : List[str] = min_resolution
snake_case__ : int = max_resolution
snake_case__ : Union[str, Any] = size
snake_case__ : Tuple = do_normalize
snake_case__ : List[str] = do_convert_rgb
snake_case__ : List[Any] = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
snake_case__ : Tuple = patch_size if patch_size is not None else {'height': 1_6, 'width': 1_6}
def lowercase_ ( self : str ) ->int:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def lowercase_ ( self : Optional[Any] ) ->str:
snake_case__ : List[Any] = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
snake_case__ : List[Any] = Image.open(requests.get(_snake_case, stream=_snake_case ).raw ).convert('RGB' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class snake_case__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = PixaStructImageProcessor if is_vision_available() else None
def lowercase_ ( self : str ) ->str:
snake_case__ : Optional[int] = PixaStructImageProcessingTester(self )
@property
def lowercase_ ( self : Union[str, Any] ) ->Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : str ) ->Union[str, Any]:
snake_case__ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case, 'do_normalize' ) )
self.assertTrue(hasattr(_snake_case, 'do_convert_rgb' ) )
def lowercase_ ( self : str ) ->Union[str, Any]:
snake_case__ : List[str] = self.image_processor_tester.prepare_dummy_image()
snake_case__ : Tuple = self.image_processing_class(**self.image_processor_dict )
snake_case__ : Optional[int] = 2_0_4_8
snake_case__ : Optional[int] = image_processor(_snake_case, return_tensors='pt', max_patches=_snake_case )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean(), torch.tensor(0.0_6_0_6 ), atol=1e-3, rtol=1e-3 ) )
def lowercase_ ( self : Tuple ) ->Dict:
# Initialize image_processor
snake_case__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Tuple = prepare_image_inputs(self.image_processor_tester, equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case, Image.Image )
# Test not batched input
snake_case__ : Optional[Any] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case__ : Optional[Any] = image_processor(
image_inputs[0], return_tensors='pt', max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
snake_case__ : Any = image_processor(
_snake_case, return_tensors='pt', max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def lowercase_ ( self : List[str] ) ->Optional[Any]:
# Initialize image_processor
snake_case__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Optional[int] = prepare_image_inputs(self.image_processor_tester, equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case, Image.Image )
# Test not batched input
snake_case__ : Optional[Any] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
snake_case__ : Union[str, Any] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_snake_case ):
snake_case__ : int = image_processor(
image_inputs[0], return_tensors='pt', max_patches=_snake_case ).flattened_patches
snake_case__ : Optional[Any] = 'Hello'
snake_case__ : Dict = image_processor(
image_inputs[0], return_tensors='pt', max_patches=_snake_case, header_text=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
snake_case__ : List[Any] = image_processor(
_snake_case, return_tensors='pt', max_patches=_snake_case, header_text=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def lowercase_ ( self : Any ) ->int:
# Initialize image_processor
snake_case__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : Any = prepare_image_inputs(self.image_processor_tester, equal_resolution=_snake_case, numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case, np.ndarray )
snake_case__ : Union[str, Any] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case__ : List[str] = image_processor(
image_inputs[0], return_tensors='pt', max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
snake_case__ : Dict = image_processor(
_snake_case, return_tensors='pt', max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def lowercase_ ( self : List[Any] ) ->List[Any]:
# Initialize image_processor
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=_snake_case, torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case, torch.Tensor )
# Test not batched input
snake_case__ : Any = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case__ : Optional[Any] = image_processor(
image_inputs[0], return_tensors='pt', max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
snake_case__ : int = image_processor(
_snake_case, return_tensors='pt', max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class snake_case__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = PixaStructImageProcessor if is_vision_available() else None
def lowercase_ ( self : Any ) ->Union[str, Any]:
snake_case__ : Union[str, Any] = PixaStructImageProcessingTester(self, num_channels=4 )
snake_case__ : int = 3
@property
def lowercase_ ( self : Optional[Any] ) ->List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : Optional[int] ) ->Optional[int]:
snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case, 'do_normalize' ) )
self.assertTrue(hasattr(_snake_case, 'do_convert_rgb' ) )
def lowercase_ ( self : Optional[int] ) ->str:
# Initialize image_processor
snake_case__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Dict = prepare_image_inputs(self.image_processor_tester, equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case, Image.Image )
# Test not batched input
snake_case__ : List[Any] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case__ : Any = image_processor(
image_inputs[0], return_tensors='pt', max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
snake_case__ : Dict = image_processor(
_snake_case, return_tensors='pt', max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
| 478 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase = {
'''configuration_nllb_moe''': [
'''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''NllbMoeConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NllbMoeForConditionalGeneration''',
'''NllbMoeModel''',
'''NllbMoePreTrainedModel''',
'''NllbMoeTop2Router''',
'''NllbMoeSparseMLP''',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 721 |
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class snake_case ( __UpperCAmelCase ):
def __init__( self :List[Any] , _lowerCamelCase :List[str]=0.0_1 , _lowerCamelCase :Dict=1_0_0_0 ):
__SCREAMING_SNAKE_CASE : Dict = p_stop
__SCREAMING_SNAKE_CASE : str = max_length
def __iter__( self :int ):
__SCREAMING_SNAKE_CASE : List[Any] = 0
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
while not stop and count < self.max_length:
yield count
count += 1
__SCREAMING_SNAKE_CASE : Any = random.random() < self.p_stop
class snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self :Tuple , _lowerCamelCase :List[str] , _lowerCamelCase :List[Any] , _lowerCamelCase :str=False , _lowerCamelCase :Optional[int]=True ):
__SCREAMING_SNAKE_CASE : Optional[int] = [
BatchSamplerShard(_lowerCamelCase , 2 , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
for i in range(2 )
]
__SCREAMING_SNAKE_CASE : Optional[Any] = [list(_lowerCamelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(_lowerCamelCase ) for shard in batch_sampler_shards] , [len(_lowerCamelCase ) for e in expected] )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
# Check the shards when the dataset is a round multiple of total batch size.
__SCREAMING_SNAKE_CASE : Tuple = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=_lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__SCREAMING_SNAKE_CASE : Optional[Any] = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Dict = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [0, 1, 2]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__SCREAMING_SNAKE_CASE : str = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 0, 1]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Dict = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__SCREAMING_SNAKE_CASE : Tuple = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [1, 2, 3]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Dict = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
# Check the shards when the dataset is very small.
__SCREAMING_SNAKE_CASE : Optional[int] = BatchSampler(range(2 ) , batch_size=3 , drop_last=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = BatchSampler(range(2 ) , batch_size=3 , drop_last=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Any = [[], []]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
# Check the shards when the dataset is a round multiple of batch size.
__SCREAMING_SNAKE_CASE : List[Any] = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=_lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size.
__SCREAMING_SNAKE_CASE : Optional[Any] = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [0, 1]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__SCREAMING_SNAKE_CASE : List[Any] = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Any = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [1, 2]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
# Check the shards when the dataset is very small.
__SCREAMING_SNAKE_CASE : List[str] = BatchSampler(range(2 ) , batch_size=4 , drop_last=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Any = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Dict = BatchSampler(range(2 ) , batch_size=4 , drop_last=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Any = [[], []]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :Any ):
# Check the shards when the dataset is a round multiple of total batch size.
__SCREAMING_SNAKE_CASE : str = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=_lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__SCREAMING_SNAKE_CASE : Optional[int] = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__SCREAMING_SNAKE_CASE : Dict = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__SCREAMING_SNAKE_CASE : List[Any] = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
# Check the shards when the dataset is very small.
__SCREAMING_SNAKE_CASE : int = BatchSampler(range(2 ) , batch_size=3 , drop_last=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = [[[0, 1]], []]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = BatchSampler(range(2 ) , batch_size=3 , drop_last=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = [[], []]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :Any ):
# Check the shards when the dataset is a round multiple of batch size.
__SCREAMING_SNAKE_CASE : List[str] = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=_lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size.
__SCREAMING_SNAKE_CASE : Tuple = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__SCREAMING_SNAKE_CASE : Union[str, Any] = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Dict = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
# Check the shards when the dataset is very small.
__SCREAMING_SNAKE_CASE : str = BatchSampler(range(2 ) , batch_size=4 , drop_last=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = [[[0, 1]], []]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = BatchSampler(range(2 ) , batch_size=4 , drop_last=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = [[], []]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :int ):
__SCREAMING_SNAKE_CASE : Dict = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 1_0, 1_1], [1_2, 1_3]]
__SCREAMING_SNAKE_CASE : List[Any] = [BatchSamplerShard(_lowerCamelCase , 2 , _lowerCamelCase , even_batches=_lowerCamelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [1_2, 1_3]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 1_0, 1_1]] )
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] , _lowerCamelCase :Any , _lowerCamelCase :Dict , _lowerCamelCase :Dict , _lowerCamelCase :int=False , _lowerCamelCase :List[str]=2 , _lowerCamelCase :Optional[int]=False ):
random.seed(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = list(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = [
IterableDatasetShard(
_lowerCamelCase , batch_size=_lowerCamelCase , drop_last=_lowerCamelCase , num_processes=_lowerCamelCase , process_index=_lowerCamelCase , split_batches=_lowerCamelCase , )
for i in range(_lowerCamelCase )
]
__SCREAMING_SNAKE_CASE : List[Any] = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(_lowerCamelCase )
iterable_dataset_lists.append(list(_lowerCamelCase ) )
__SCREAMING_SNAKE_CASE : Optional[int] = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__SCREAMING_SNAKE_CASE : Dict = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
self.assertTrue(len(_lowerCamelCase ) % shard_batch_size == 0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
for idx in range(0 , len(_lowerCamelCase ) , _lowerCamelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(_lowerCamelCase ) < len(_lowerCamelCase ):
reference += reference
self.assertListEqual(_lowerCamelCase , reference[: len(_lowerCamelCase )] )
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
__SCREAMING_SNAKE_CASE : Optional[Any] = 4_2
__SCREAMING_SNAKE_CASE : str = RandomIterableDataset()
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
# Edge case with a very small dataset
__SCREAMING_SNAKE_CASE : Optional[int] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
__SCREAMING_SNAKE_CASE : Dict = BatchSampler(range(1_6 ) , batch_size=4 , drop_last=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = SkipBatchSampler(_lowerCamelCase , 2 )
self.assertListEqual(list(_lowerCamelCase ) , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def SCREAMING_SNAKE_CASE_ ( self :str ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = SkipDataLoader(list(range(1_6 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
__SCREAMING_SNAKE_CASE : Any = DataLoader(list(range(1_6 ) ) , batch_size=4 )
__SCREAMING_SNAKE_CASE : List[Any] = skip_first_batches(_lowerCamelCase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
__SCREAMING_SNAKE_CASE : Optional[int] = DataLoaderShard(list(range(1_6 ) ) , batch_size=4 )
for idx, _ in enumerate(_lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
Accelerator()
__SCREAMING_SNAKE_CASE : List[Any] = DataLoaderDispatcher(range(1_6 ) , batch_size=4 )
for idx, _ in enumerate(_lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 401 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCAmelCase ( __lowercase , unittest.TestCase ):
A__ : Tuple = DanceDiffusionPipeline
A__ : Optional[int] = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
A__ : int = PipelineTesterMixin.required_optional_params - {
'''callback''',
'''latents''',
'''callback_steps''',
'''output_type''',
'''num_images_per_prompt''',
}
A__ : int = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
A__ : Dict = False
A__ : Optional[Any] = False
def _a ( self ):
torch.manual_seed(0 )
lowerCamelCase__ =UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=SCREAMING_SNAKE_CASE_ , use_timestep_embedding=SCREAMING_SNAKE_CASE_ , time_embedding_type="fourier" , mid_block_type="UNetMidBlock1D" , down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , )
lowerCamelCase__ =IPNDMScheduler()
lowerCamelCase__ ={
"unet": unet,
"scheduler": scheduler,
}
return components
def _a ( self , _lowerCamelCase , _lowerCamelCase=0 ):
if str(SCREAMING_SNAKE_CASE_ ).startswith("mps" ):
lowerCamelCase__ =torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
lowerCamelCase__ =torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ ={
"batch_size": 1,
"generator": generator,
"num_inference_steps": 4,
}
return inputs
def _a ( self ):
lowerCamelCase__ ="cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ =self.get_dummy_components()
lowerCamelCase__ =DanceDiffusionPipeline(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ =pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ =self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ =pipe(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ =output.audios
lowerCamelCase__ =audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
lowerCamelCase__ =np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _a ( self ):
return super().test_save_load_local()
@skip_mps
def _a ( self ):
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def _a ( self ):
return super().test_save_load_optional_components()
@skip_mps
def _a ( self ):
return super().test_attention_slicing_forward_pass()
def _a ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
def _a ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ):
lowerCamelCase__ =torch_device
lowerCamelCase__ =DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" )
lowerCamelCase__ =pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ =torch.manual_seed(0 )
lowerCamelCase__ =pipe(generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
lowerCamelCase__ =output.audios
lowerCamelCase__ =audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCamelCase__ =np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ):
lowerCamelCase__ =torch_device
lowerCamelCase__ =DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" , torch_dtype=torch.floataa )
lowerCamelCase__ =pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ =torch.manual_seed(0 )
lowerCamelCase__ =pipe(generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
lowerCamelCase__ =output.audios
lowerCamelCase__ =audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCamelCase__ =np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 530 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case ( __lowercase ):
UpperCAmelCase__ = (UnCLIPScheduler,)
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {
'''num_train_timesteps''': 10_00,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def _lowercase (self ):
"""simple docstring"""
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for time_step in [0, 5_00, 9_99]:
for prev_timestep in [None, 5, 1_00, 2_50, 5_00, 7_50]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ , prev_timestep=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config(variance_type='''fixed_small_log''' )
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_0_0_0e-1_0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_54_96_25 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.9_99_49_87 ) ) < 1e-5
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config(variance_type='''learned_range''' )
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = 0.5
assert scheduler._get_variance(1 , predicted_variance=SCREAMING_SNAKE_CASE_ ) - -10.1_71_27_90 < 1e-5
assert scheduler._get_variance(4_87 , predicted_variance=SCREAMING_SNAKE_CASE_ ) - -5.7_99_80_52 < 1e-5
assert scheduler._get_variance(9_99 , predicted_variance=SCREAMING_SNAKE_CASE_ ) - -0.0_01_00_11 < 1e-5
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = scheduler.timesteps
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
for i, t in enumerate(SCREAMING_SNAKE_CASE_ ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE_ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample
SCREAMING_SNAKE_CASE_ = pred_prev_sample
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1e-2
assert abs(result_mean.item() - 0.3_28_47_43 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(25 )
SCREAMING_SNAKE_CASE_ = scheduler.timesteps
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
for i, t in enumerate(SCREAMING_SNAKE_CASE_ ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if i + 1 == timesteps.shape[0]:
SCREAMING_SNAKE_CASE_ = None
else:
SCREAMING_SNAKE_CASE_ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE_ = scheduler.step(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prev_timestep=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample
SCREAMING_SNAKE_CASE_ = pred_prev_sample
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1e-2
assert abs(result_mean.item() - 0.3_36_20_38 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
pass
def _lowercase (self ):
"""simple docstring"""
pass | 626 | 0 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def UpperCAmelCase_ ( ):
'''simple docstring'''
_a : List[str] = ArgumentParser(
description=(
'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=A , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=A , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=A )
return parser.parse_args()
def UpperCAmelCase_ ( ):
'''simple docstring'''
_a : Dict = parse_args()
# Import training_script as a module.
_a : Any = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_a : List[str] = script_fpath.stem
_a : List[str] = importlib.import_module(A )
# Patch sys.argv
_a : List[str] = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 424 |
'''simple docstring'''
def UpperCAmelCase_ ( A , A , A ):
'''simple docstring'''
return round(float(moles / volume ) * nfactor )
def UpperCAmelCase_ ( A , A , A ):
'''simple docstring'''
return round(float((moles * 0.08_21 * temperature) / (volume) ) )
def UpperCAmelCase_ ( A , A , A ):
'''simple docstring'''
return round(float((moles * 0.08_21 * temperature) / (pressure) ) )
def UpperCAmelCase_ ( A , A , A ):
'''simple docstring'''
return round(float((pressure * volume) / (0.08_21 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 424 | 1 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
_lowerCAmelCase: List[str] = ['bert-base-uncased', 'bert-base-cased']
_lowerCAmelCase: List[str] = 'hf-internal-testing/tiny-bert-tf-only'
if is_tf_available():
class lowercase_ (tf.keras.Model ):
def __init__( self , lowercase_) -> List[str]:
super().__init__()
a__ =tokenizer
a__ =AutoConfig.from_pretrained(lowercase_)
a__ =TFAutoModel.from_config(lowercase_)
def __UpperCamelCase ( self , lowercase_) -> Optional[int]:
a__ =self.tokenizer(lowercase_)
a__ =self.bert(**lowercase_)
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self) -> Dict:
super().setUp()
a__ =[
BertTokenizer.from_pretrained(lowercase_) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
a__ =[TFBertTokenizer.from_pretrained(lowercase_) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(lowercase_ , use_fast_bert_tokenizer=lowercase_)
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers) == len(self.tf_tokenizers)
a__ =[
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
a__ =list(zip(self.test_sentences , self.test_sentences[::-1]))
def __UpperCamelCase ( self) -> List[str]:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers):
for test_inputs in (self.test_sentences, self.paired_sentences):
a__ =tokenizer(lowercase_ , return_tensors='tf' , padding='longest')
a__ =tf_tokenizer(lowercase_)
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape))
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa) == tf_outputs[key]))
@slow
def __UpperCamelCase ( self) -> Union[str, Any]:
for tf_tokenizer in self.tf_tokenizers:
a__ =tf_tokenizer(self.paired_sentences)
a__ =tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa) == separated_outputs[key]))
@slow
def __UpperCamelCase ( self) -> Union[str, Any]:
for tf_tokenizer in self.tf_tokenizers:
a__ =tf.function(lowercase_)
for test_inputs in (self.test_sentences, self.paired_sentences):
a__ =tf.constant(lowercase_)
a__ =compiled_tokenizer(lowercase_)
a__ =tf_tokenizer(lowercase_)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def __UpperCamelCase ( self) -> Union[str, Any]:
for tf_tokenizer in self.tf_tokenizers:
a__ =ModelToSave(tokenizer=lowercase_)
a__ =tf.convert_to_tensor(self.test_sentences)
a__ =model(lowercase_) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
a__ =Path(lowercase_) / 'saved.model'
model.save(lowercase_)
a__ =tf.keras.models.load_model(lowercase_)
a__ =loaded_model(lowercase_)
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output)) , 1e-5)
| 20 |
'''simple docstring'''
def lowerCamelCase ( _snake_case : int = 50_000_000 ):
'''simple docstring'''
lowercase__ = set()
lowercase__ = int((limit - 24) ** (1 / 2) )
lowercase__ = set(range(3 ,prime_square_limit + 1 ,2 ) )
primes.add(2 )
for p in range(3 ,prime_square_limit + 1 ,2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p ,prime_square_limit + 1 ,_snake_case ) ) )
for primea in primes:
lowercase__ = primea * primea
for primea in primes:
lowercase__ = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
lowercase__ = primea * primea * primea * primea
lowercase__ = square + cube + tetr
if total >= limit:
break
ret.add(_snake_case )
return len(_snake_case )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 267 | 0 |
"""simple docstring"""
import tensorflow as tf
from ...tf_utils import shape_list
class _UpperCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=1 , __lowercase=False , **__lowercase ):
super().__init__(**__lowercase )
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = d_embed
UpperCAmelCase__ = d_proj
UpperCAmelCase__ = cutoffs + [vocab_size]
UpperCAmelCase__ = [0] + self.cutoffs
UpperCAmelCase__ = div_val
UpperCAmelCase__ = self.cutoffs[0]
UpperCAmelCase__ = len(self.cutoffs ) - 1
UpperCAmelCase__ = self.shortlist_size + self.n_clusters
UpperCAmelCase__ = keep_order
UpperCAmelCase__ = []
UpperCAmelCase__ = []
def A__ ( self , __lowercase ):
if self.n_clusters > 0:
UpperCAmelCase__ = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=__lowercase , name="""cluster_weight""" )
UpperCAmelCase__ = self.add_weight(
shape=(self.n_clusters,) , initializer="""zeros""" , trainable=__lowercase , name="""cluster_bias""" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
UpperCAmelCase__ = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=__lowercase , name=F'''out_projs_._{i}''' , )
self.out_projs.append(__lowercase )
else:
self.out_projs.append(__lowercase )
UpperCAmelCase__ = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=__lowercase , name=F'''out_layers_._{i}_._weight''' , )
UpperCAmelCase__ = self.add_weight(
shape=(self.vocab_size,) , initializer="""zeros""" , trainable=__lowercase , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
UpperCAmelCase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase__ = self.d_embed // (self.div_val**i)
UpperCAmelCase__ = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=__lowercase , name=F'''out_projs_._{i}''' )
self.out_projs.append(__lowercase )
UpperCAmelCase__ = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=__lowercase , name=F'''out_layers_._{i}_._weight''' , )
UpperCAmelCase__ = self.add_weight(
shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=__lowercase , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(__lowercase )
@staticmethod
def A__ ( __lowercase , __lowercase , __lowercase , __lowercase=None ):
UpperCAmelCase__ = x
if proj is not None:
UpperCAmelCase__ = tf.einsum("""ibd,ed->ibe""" , __lowercase , __lowercase )
return tf.einsum("""ibd,nd->ibn""" , __lowercase , __lowercase ) + b
@staticmethod
def A__ ( __lowercase , __lowercase ):
UpperCAmelCase__ = shape_list(__lowercase )
UpperCAmelCase__ = tf.range(lp_size[0] , dtype=target.dtype )
UpperCAmelCase__ = tf.stack([r, target] , 1 )
return tf.gather_nd(__lowercase , __lowercase )
def A__ ( self , __lowercase , __lowercase , __lowercase=True , __lowercase=False ):
UpperCAmelCase__ = 0
if self.n_clusters == 0:
UpperCAmelCase__ = self._logit(__lowercase , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
UpperCAmelCase__ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=__lowercase , logits=__lowercase )
UpperCAmelCase__ = tf.nn.log_softmax(__lowercase , axis=-1 )
else:
UpperCAmelCase__ = shape_list(__lowercase )
UpperCAmelCase__ = []
UpperCAmelCase__ = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
UpperCAmelCase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
UpperCAmelCase__ = (target >= l_idx) & (target < r_idx)
UpperCAmelCase__ = tf.where(__lowercase )
UpperCAmelCase__ = tf.boolean_mask(__lowercase , __lowercase ) - l_idx
if self.div_val == 1:
UpperCAmelCase__ = self.out_layers[0][0][l_idx:r_idx]
UpperCAmelCase__ = self.out_layers[0][1][l_idx:r_idx]
else:
UpperCAmelCase__ = self.out_layers[i][0]
UpperCAmelCase__ = self.out_layers[i][1]
if i == 0:
UpperCAmelCase__ = tf.concat([cur_W, self.cluster_weight] , 0 )
UpperCAmelCase__ = tf.concat([cur_b, self.cluster_bias] , 0 )
UpperCAmelCase__ = self._logit(__lowercase , __lowercase , __lowercase , self.out_projs[0] )
UpperCAmelCase__ = tf.nn.log_softmax(__lowercase )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
UpperCAmelCase__ = tf.boolean_mask(__lowercase , __lowercase )
UpperCAmelCase__ = self._gather_logprob(__lowercase , __lowercase )
else:
UpperCAmelCase__ = self._logit(__lowercase , __lowercase , __lowercase , self.out_projs[i] )
UpperCAmelCase__ = tf.nn.log_softmax(__lowercase )
UpperCAmelCase__ = self.cutoffs[0] + i - 1 # No probability for the head cluster
UpperCAmelCase__ = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(__lowercase )
if target is not None:
UpperCAmelCase__ = tf.boolean_mask(__lowercase , __lowercase )
UpperCAmelCase__ = tf.boolean_mask(__lowercase , __lowercase )
UpperCAmelCase__ = self._gather_logprob(__lowercase , __lowercase )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(__lowercase , -cur_logprob , shape_list(__lowercase ) )
UpperCAmelCase__ = tf.concat(__lowercase , axis=-1 )
if target is not None:
if return_mean:
UpperCAmelCase__ = tf.reduce_mean(__lowercase )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(__lowercase )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(__lowercase , name=self.name , aggregation="""mean""" if return_mean else """""" )
return out
| 713 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a : str = logging.get_logger(__name__) # pylint: disable=invalid-name
a : Any = '''
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)["depth"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline("depth-estimation")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to("cuda")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16
... )
>>> pipe = pipe.to("cuda")
>>> img = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/cat.png"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")
>>> prompt = "A robot, 4k photo"
>>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"
>>> generator = torch.Generator(device="cuda").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save("robot_cat.png")
```
'''
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=8 ) ->str:
UpperCAmelCase__ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase__ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _UpperCamelCase ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase , ):
super().__init__()
self.register_modules(
unet=__lowercase , scheduler=__lowercase , movq=__lowercase , )
UpperCAmelCase__ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def A__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
if latents is None:
UpperCAmelCase__ = randn_tensor(__lowercase , generator=__lowercase , device=__lowercase , dtype=__lowercase )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
UpperCAmelCase__ = latents.to(__lowercase )
UpperCAmelCase__ = latents * scheduler.init_noise_sigma
return latents
def A__ ( self , __lowercase=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
UpperCAmelCase__ = torch.device(F'''cuda:{gpu_id}''' )
UpperCAmelCase__ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowercase , __lowercase )
def A__ ( self , __lowercase=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
UpperCAmelCase__ = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=__lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase__ = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase__ , UpperCAmelCase__ = cpu_offload_with_hook(__lowercase , __lowercase , prev_module_hook=__lowercase )
# We'll offload the last model manually.
UpperCAmelCase__ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def A__ ( self ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__lowercase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__lowercase )
def __call__( self , __lowercase , __lowercase , __lowercase , __lowercase = 512 , __lowercase = 512 , __lowercase = 100 , __lowercase = 4.0 , __lowercase = 1 , __lowercase = None , __lowercase = None , __lowercase = "pil" , __lowercase = True , ):
UpperCAmelCase__ = self._execution_device
UpperCAmelCase__ = guidance_scale > 1.0
if isinstance(__lowercase , __lowercase ):
UpperCAmelCase__ = torch.cat(__lowercase , dim=0 )
if isinstance(__lowercase , __lowercase ):
UpperCAmelCase__ = torch.cat(__lowercase , dim=0 )
if isinstance(__lowercase , __lowercase ):
UpperCAmelCase__ = torch.cat(__lowercase , dim=0 )
UpperCAmelCase__ = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
UpperCAmelCase__ = image_embeds.repeat_interleave(__lowercase , dim=0 )
UpperCAmelCase__ = negative_image_embeds.repeat_interleave(__lowercase , dim=0 )
UpperCAmelCase__ = hint.repeat_interleave(__lowercase , dim=0 )
UpperCAmelCase__ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__lowercase )
UpperCAmelCase__ = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=__lowercase )
self.scheduler.set_timesteps(__lowercase , device=__lowercase )
UpperCAmelCase__ = self.scheduler.timesteps
UpperCAmelCase__ = self.movq.config.latent_channels
UpperCAmelCase__ , UpperCAmelCase__ = downscale_height_and_width(__lowercase , __lowercase , self.movq_scale_factor )
# create initial latent
UpperCAmelCase__ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __lowercase , __lowercase , __lowercase , self.scheduler , )
for i, t in enumerate(self.progress_bar(__lowercase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase__ = {"""image_embeds""": image_embeds, """hint""": hint}
UpperCAmelCase__ = self.unet(
sample=__lowercase , timestep=__lowercase , encoder_hidden_states=__lowercase , added_cond_kwargs=__lowercase , return_dict=__lowercase , )[0]
if do_classifier_free_guidance:
UpperCAmelCase__ , UpperCAmelCase__ = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase__ , UpperCAmelCase__ = noise_pred.chunk(2 )
UpperCAmelCase__ , UpperCAmelCase__ = variance_pred.chunk(2 )
UpperCAmelCase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase__ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase__ , UpperCAmelCase__ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase__ = self.scheduler.step(
__lowercase , __lowercase , __lowercase , generator=__lowercase , )[0]
# post-processing
UpperCAmelCase__ = self.movq.decode(__lowercase , force_not_quantize=__lowercase )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
UpperCAmelCase__ = image * 0.5 + 0.5
UpperCAmelCase__ = image.clamp(0 , 1 )
UpperCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase__ = self.numpy_to_pil(__lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowercase )
| 422 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ : int = logging.get_logger(__name__)
lowerCAmelCase_ : Tuple = [
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def __A ( lowerCAmelCase_ ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_UpperCAmelCase : int = k.replace(lowerCAmelCase_ , lowerCAmelCase_ )
if k.startswith("""encoder""" ):
_UpperCAmelCase : int = k.replace(""".attn""" , """.self_attn""" )
_UpperCAmelCase : Optional[int] = k.replace("""norm1""" , """self_attn_layer_norm""" )
_UpperCAmelCase : str = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
_UpperCAmelCase : str = k.replace("""norm1""" , """self_attn_layer_norm""" )
_UpperCAmelCase : List[Any] = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
_UpperCAmelCase : Any = k.replace("""norm3""" , """final_layer_norm""" )
return k
def __A ( lowerCAmelCase_ ):
_UpperCAmelCase : Union[str, Any] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
_UpperCAmelCase : Union[str, Any] = sd.pop(lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
_UpperCAmelCase : str = v
lowerCAmelCase_ : Any = ['''START''']
@torch.no_grad()
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Optional[int] = torch.load(lowerCAmelCase_ , map_location="""cpu""" )
_UpperCAmelCase : str = model["""model"""]
_UpperCAmelCase : int = BlenderbotConfig.from_json_file(lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = BlenderbotForConditionalGeneration(lowerCAmelCase_ )
_UpperCAmelCase : List[str] = m.model.state_dict().keys()
_UpperCAmelCase : int = []
_UpperCAmelCase : Union[str, Any] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_UpperCAmelCase : Optional[int] = rename_state_dict_key(lowerCAmelCase_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_UpperCAmelCase : Tuple = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(lowerCAmelCase_ )
m.model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
m.half()
m.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
lowerCAmelCase_ : List[str] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 414 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class __lowerCAmelCase ( __a ):
snake_case : Union[List[PIL.Image.Image], np.ndarray]
snake_case : Optional[List[bool]]
snake_case : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 414 | 1 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = emb.weight.shape
__SCREAMING_SNAKE_CASE = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = emb.weight.data
return lin_layer
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_="facebook/mbart-large-en-ro" , lowerCAmelCase_=False , lowerCAmelCase_=False ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = torch.load(lowerCAmelCase_ , map_location="cpu" )["model"]
remove_ignore_keys_(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = state_dict["encoder.embed_tokens.weight"].shape[0]
__SCREAMING_SNAKE_CASE = MBartConfig.from_pretrained(lowerCAmelCase_ , vocab_size=lowerCAmelCase_ )
if mbart_aa and finetuned:
__SCREAMING_SNAKE_CASE = "relu"
__SCREAMING_SNAKE_CASE = state_dict["decoder.embed_tokens.weight"]
__SCREAMING_SNAKE_CASE = MBartForConditionalGeneration(lowerCAmelCase_ )
model.model.load_state_dict(lowerCAmelCase_ )
if finetuned:
__SCREAMING_SNAKE_CASE = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
a__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
a__ : Optional[Any] = parser.parse_args()
a__ : Optional[int] = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 706 |
"""simple docstring"""
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return EnvironmentCommand()
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
@staticmethod
def UpperCAmelCase_ ( UpperCAmelCase__ : ArgumentParser ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = parser.add_parser("env" )
download_parser.set_defaults(func=UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
__SCREAMING_SNAKE_CASE = huggingface_hub.__version__
__SCREAMING_SNAKE_CASE = "not installed"
__SCREAMING_SNAKE_CASE = "NA"
if is_torch_available():
import torch
__SCREAMING_SNAKE_CASE = torch.__version__
__SCREAMING_SNAKE_CASE = torch.cuda.is_available()
__SCREAMING_SNAKE_CASE = "not installed"
if is_transformers_available():
import transformers
__SCREAMING_SNAKE_CASE = transformers.__version__
__SCREAMING_SNAKE_CASE = "not installed"
if is_accelerate_available():
import accelerate
__SCREAMING_SNAKE_CASE = accelerate.__version__
__SCREAMING_SNAKE_CASE = "not installed"
if is_xformers_available():
import xformers
__SCREAMING_SNAKE_CASE = xformers.__version__
__SCREAMING_SNAKE_CASE = {
"`diffusers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyTorch version (GPU?)": F"""{pt_version} ({pt_cuda_available})""",
"Huggingface_hub version": hub_version,
"Transformers version": transformers_version,
"Accelerate version": accelerate_version,
"xFormers version": xformers_version,
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(UpperCAmelCase__ ) )
return info
@staticmethod
def UpperCAmelCase_ ( UpperCAmelCase__ : Optional[Any] ) -> str:
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 553 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class __snake_case :
def __init__( self ,a_ ,a_=13 ,a_=7 ,a_=True ,a_=True ,a_=True ,a_=True ,a_=99 ,a_=32 ,a_=2 ,a_=4 ,a_=37 ,a_="gelu" ,a_=0.1 ,a_=0.1 ,a_=512 ,a_=16 ,a_=2 ,a_=0.02 ,a_=3 ,a_=4 ,a_=None ,a_=1000 ,):
"""simple docstring"""
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
lowerCAmelCase__ = range_bbox
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length, 4] ,self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCAmelCase__ = bbox[i, j, 3]
lowerCAmelCase__ = bbox[i, j, 1]
lowerCAmelCase__ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCAmelCase__ = bbox[i, j, 2]
lowerCAmelCase__ = bbox[i, j, 0]
lowerCAmelCase__ = t
lowerCAmelCase__ = tf.convert_to_tensor(a_ )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] ,self.num_choices )
lowerCAmelCase__ = LayoutLMConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = TFLayoutLMModel(config=a_ )
lowerCAmelCase__ = model(a_ ,a_ ,attention_mask=a_ ,token_type_ids=a_ )
lowerCAmelCase__ = model(a_ ,a_ ,token_type_ids=a_ )
lowerCAmelCase__ = model(a_ ,a_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = TFLayoutLMForMaskedLM(config=a_ )
lowerCAmelCase__ = model(a_ ,a_ ,attention_mask=a_ ,token_type_ids=a_ ,labels=a_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFLayoutLMForSequenceClassification(config=a_ )
lowerCAmelCase__ = model(a_ ,a_ ,attention_mask=a_ ,token_type_ids=a_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFLayoutLMForTokenClassification(config=a_ )
lowerCAmelCase__ = model(a_ ,a_ ,attention_mask=a_ ,token_type_ids=a_ ,labels=a_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = TFLayoutLMForQuestionAnswering(config=a_ )
lowerCAmelCase__ = model(a_ ,a_ ,attention_mask=a_ ,token_type_ids=a_ )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class __snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE__ = (
{
'feature-extraction': TFLayoutLMModel,
'fill-mask': TFLayoutLMForMaskedLM,
'text-classification': TFLayoutLMForSequenceClassification,
'token-classification': TFLayoutLMForTokenClassification,
'zero-shot': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = 10
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = TFLayoutLMModelTester(self )
lowerCAmelCase__ = ConfigTester(self ,config_class=a_ ,hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFLayoutLMModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@unittest.skip('Onnx compliancy broke with TF 2.10' )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
pass
def UpperCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231
lowerCAmelCase__ = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
lowerCAmelCase__ = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
lowerCAmelCase__ = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
lowerCAmelCase__ = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class __snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = prepare_layoutlm_batch_inputs()
# forward pass
lowerCAmelCase__ = model(input_ids=a_ ,bbox=a_ ,attention_mask=a_ ,token_type_ids=a_ )
# test the sequence output on [0, :3, :3]
lowerCAmelCase__ = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] ,)
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] ,a_ ,atol=1e-3 ) )
# test the pooled output on [1, :3]
lowerCAmelCase__ = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] ,a_ ,atol=1e-3 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
# initialize model with randomly initialized sequence classification head
lowerCAmelCase__ = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' ,num_labels=2 )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = prepare_layoutlm_batch_inputs()
# forward pass
lowerCAmelCase__ = model(
input_ids=a_ ,bbox=a_ ,attention_mask=a_ ,token_type_ids=a_ ,labels=tf.convert_to_tensor([1, 1] ) ,)
# test whether we get a loss as a scalar
lowerCAmelCase__ = outputs.loss
lowerCAmelCase__ = (2,)
self.assertEqual(loss.shape ,a_ )
# test the shape of the logits
lowerCAmelCase__ = outputs.logits
lowerCAmelCase__ = (2, 2)
self.assertEqual(logits.shape ,a_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
# initialize model with randomly initialized token classification head
lowerCAmelCase__ = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' ,num_labels=13 )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = prepare_layoutlm_batch_inputs()
# forward pass
lowerCAmelCase__ = model(
input_ids=a_ ,bbox=a_ ,attention_mask=a_ ,token_type_ids=a_ ,labels=a_ )
# test the shape of the logits
lowerCAmelCase__ = outputs.logits
lowerCAmelCase__ = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape ,a_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
# initialize model with randomly initialized token classification head
lowerCAmelCase__ = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = prepare_layoutlm_batch_inputs()
# forward pass
lowerCAmelCase__ = model(input_ids=a_ ,bbox=a_ ,attention_mask=a_ ,token_type_ids=a_ )
# test the shape of the logits
lowerCAmelCase__ = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape ,a_ )
self.assertEqual(outputs.end_logits.shape ,a_ )
| 193 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
_lowerCAmelCase : Any = datasets.utils.logging.get_logger(__name__)
_lowerCAmelCase : Any = ["names", "prefix"]
_lowerCAmelCase : str = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"]
_lowerCAmelCase : List[str] = ["encoding_errors", "on_bad_lines"]
_lowerCAmelCase : int = ["date_format"]
@dataclass
class __snake_case ( datasets.BuilderConfig ):
SCREAMING_SNAKE_CASE__ = ","
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = "infer"
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = "."
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = '"'
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = 10000
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = "strict"
SCREAMING_SNAKE_CASE__ = "error"
SCREAMING_SNAKE_CASE__ = None
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
if self.delimiter is not None:
lowerCAmelCase__ = self.delimiter
if self.column_names is not None:
lowerCAmelCase__ = self.column_names
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = {
'sep': self.sep,
'header': self.header,
'names': self.names,
'index_col': self.index_col,
'usecols': self.usecols,
'prefix': self.prefix,
'mangle_dupe_cols': self.mangle_dupe_cols,
'engine': self.engine,
'converters': self.converters,
'true_values': self.true_values,
'false_values': self.false_values,
'skipinitialspace': self.skipinitialspace,
'skiprows': self.skiprows,
'nrows': self.nrows,
'na_values': self.na_values,
'keep_default_na': self.keep_default_na,
'na_filter': self.na_filter,
'verbose': self.verbose,
'skip_blank_lines': self.skip_blank_lines,
'thousands': self.thousands,
'decimal': self.decimal,
'lineterminator': self.lineterminator,
'quotechar': self.quotechar,
'quoting': self.quoting,
'escapechar': self.escapechar,
'comment': self.comment,
'encoding': self.encoding,
'dialect': self.dialect,
'error_bad_lines': self.error_bad_lines,
'warn_bad_lines': self.warn_bad_lines,
'skipfooter': self.skipfooter,
'doublequote': self.doublequote,
'memory_map': self.memory_map,
'float_precision': self.float_precision,
'chunksize': self.chunksize,
'encoding_errors': self.encoding_errors,
'on_bad_lines': self.on_bad_lines,
'date_format': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() ,a_ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class __snake_case ( datasets.ArrowBasedBuilder ):
SCREAMING_SNAKE_CASE__ = CsvConfig
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
lowerCAmelCase__ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a_ ,(str, list, tuple) ):
lowerCAmelCase__ = data_files
if isinstance(a_ ,a_ ):
lowerCAmelCase__ = [files]
lowerCAmelCase__ = [dl_manager.iter_files(a_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={'files': files} )]
lowerCAmelCase__ = []
for split_name, files in data_files.items():
if isinstance(a_ ,a_ ):
lowerCAmelCase__ = [files]
lowerCAmelCase__ = [dl_manager.iter_files(a_ ) for file in files]
splits.append(datasets.SplitGenerator(name=a_ ,gen_kwargs={'files': files} ) )
return splits
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
if self.config.features is not None:
lowerCAmelCase__ = self.config.features.arrow_schema
if all(not require_storage_cast(a_ ) for feature in self.config.features.values() ):
# cheaper cast
lowerCAmelCase__ = pa.Table.from_arrays([pa_table[field.name] for field in schema] ,schema=a_ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
lowerCAmelCase__ = table_cast(a_ ,a_ )
return pa_table
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
lowerCAmelCase__ = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(a_ ) else object
for name, dtype, feature in zip(schema.names ,schema.types ,self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(a_ ) ):
lowerCAmelCase__ = pd.read_csv(a_ ,iterator=a_ ,dtype=a_ ,**self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(a_ ):
lowerCAmelCase__ = pa.Table.from_pandas(a_ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(a_ )
except ValueError as e:
logger.error(f'Failed to read file \'{file}\' with error {type(a_ )}: {e}' )
raise
| 193 | 1 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
"""simple docstring"""
A_ = TextToVideoSDPipeline
A_ = TEXT_TO_IMAGE_PARAMS
A_ = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
A_ = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def __A ( self: Optional[Any] ) -> List[Any]:
torch.manual_seed(0 )
_A = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , )
_A = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
_A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , )
_A = CLIPTextModel(__A )
_A = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_A = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def __A ( self: Union[str, Any] , __A: List[Any] , __A: Tuple=0 ) -> int:
if str(__A ).startswith('''mps''' ):
_A = torch.manual_seed(__A )
else:
_A = torch.Generator(device=__A ).manual_seed(__A )
_A = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def __A ( self: List[Any] ) -> Dict:
_A = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = TextToVideoSDPipeline(**__A )
_A = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
_A = self.get_dummy_inputs(__A )
_A = '''np'''
_A = sd_pipe(**__A ).frames
_A = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
_A = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self: Dict ) -> str:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__A , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __A ( self: Union[str, Any] ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__A , expected_max_diff=1e-2 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def __A ( self: Any ) -> Optional[Any]:
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def __A ( self: Optional[Any] ) -> Optional[int]:
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def __A ( self: Optional[int] ) -> List[str]:
pass
def __A ( self: List[Any] ) -> Dict:
return super().test_progress_bar()
@slow
@skip_mps
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __A ( self: List[Any] ) -> str:
_A = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''' )
_A = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
_A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_A = pipe.to('''cuda''' )
_A = '''Spiderman is surfing'''
_A = torch.Generator(device='''cpu''' ).manual_seed(0 )
_A = pipe(__A , generator=__A , num_inference_steps=25 , output_type='''pt''' ).frames
_A = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def __A ( self: List[Any] ) -> Union[str, Any]:
_A = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''' )
_A = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
_A = pipe.to('''cuda''' )
_A = '''Spiderman is surfing'''
_A = torch.Generator(device='''cpu''' ).manual_seed(0 )
_A = pipe(__A , generator=__A , num_inference_steps=2 , output_type='''pt''' ).frames
_A = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 720 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__A = NewType('DataClass', Any)
__A = NewType('DataClassType', Any)
def __A ( _lowercase ):
'''simple docstring'''
if isinstance(_lowercase , _lowercase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def __A ( _lowercase ):
'''simple docstring'''
_A = {str(_lowercase ): choice for choice in choices}
return lambda _lowercase : str_to_choice.get(_lowercase , _lowercase )
def __A ( *,
_lowercase = None , _lowercase = None , _lowercase = dataclasses.MISSING , _lowercase = dataclasses.MISSING , _lowercase = None , **_lowercase , ):
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
_A = {}
if aliases is not None:
_A = aliases
if help is not None:
_A = help
return dataclasses.field(metadata=_lowercase , default=_lowercase , default_factory=_lowercase , **_lowercase )
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = 42
def __init__( self: Optional[Any] , __A: Union[DataClassType, Iterable[DataClassType]] , **__A: List[Any] ) -> str:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
_A = ArgumentDefaultsHelpFormatter
super().__init__(**__A )
if dataclasses.is_dataclass(__A ):
_A = [dataclass_types]
_A = list(__A )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__A )
@staticmethod
def __A ( __A: ArgumentParser , __A: dataclasses.Field ) -> str:
_A = f"""--{field.name}"""
_A = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __A ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
_A = kwargs.pop('''aliases''' , [] )
if isinstance(__A , __A ):
_A = [aliases]
_A = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(__A , '''UnionType''' ) and isinstance(__A , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__A ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
f""" Problem encountered in field '{field.name}'.""" )
if type(__A ) not in field.type.__args__:
# filter `str` in Union
_A = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
_A = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
_A = (
field.type.__args__[0] if isinstance(__A , field.type.__args__[1] ) else field.type.__args__[1]
)
_A = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
_A = {}
if origin_type is Literal or (isinstance(field.type , __A ) and issubclass(field.type , __A )):
if origin_type is Literal:
_A = field.type.__args__
else:
_A = [x.value for x in field.type]
_A = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
_A = field.default
else:
_A = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
_A = copy(__A )
# Hack because type=bool in argparse does not behave as we want.
_A = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
_A = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
_A = default
# This tells argparse we accept 0 or 1 value after --field_name
_A = '''?'''
# This is the value that will get picked if we do --field_name (without value)
_A = True
elif isclass(__A ) and issubclass(__A , __A ):
_A = field.type.__args__[0]
_A = '''+'''
if field.default_factory is not dataclasses.MISSING:
_A = field.default_factory()
elif field.default is dataclasses.MISSING:
_A = True
else:
_A = field.type
if field.default is not dataclasses.MISSING:
_A = field.default
elif field.default_factory is not dataclasses.MISSING:
_A = field.default_factory()
else:
_A = True
parser.add_argument(__A , *__A , **__A )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
_A = False
parser.add_argument(f"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **__A )
def __A ( self: Dict , __A: DataClassType ) -> List[Any]:
if hasattr(__A , '''_argument_group_name''' ):
_A = self.add_argument_group(dtype._argument_group_name )
else:
_A = self
try:
_A = get_type_hints(__A )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__A ):
_A = '''.'''.join(map(__A , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(__A ):
if not field.init:
continue
_A = type_hints[field.name]
self._parse_dataclass_field(__A , __A )
def __A ( self: int , __A: Any=None , __A: int=False , __A: Any=True , __A: Optional[Any]=None , __A: Any=None , ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
_A = []
if args_filename:
args_files.append(Path(__A ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
_A = ArgumentParser()
args_file_parser.add_argument(__A , type=__A , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
_A ,_A = args_file_parser.parse_known_args(args=__A )
_A = vars(__A ).get(args_file_flag.lstrip('''-''' ) , __A )
if cmd_args_file_paths:
args_files.extend([Path(__A ) for p in cmd_args_file_paths] )
_A = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
_A = file_args + args if args is not None else file_args + sys.argv[1:]
_A ,_A = self.parse_known_args(args=__A )
_A = []
for dtype in self.dataclass_types:
_A = {f.name for f in dataclasses.fields(__A ) if f.init}
_A = {k: v for k, v in vars(__A ).items() if k in keys}
for k in keys:
delattr(__A , __A )
_A = dtype(**__A )
outputs.append(__A )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__A )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def __A ( self: Tuple , __A: Dict[str, Any] , __A: bool = False ) -> Tuple[DataClass, ...]:
_A = set(args.keys() )
_A = []
for dtype in self.dataclass_types:
_A = {f.name for f in dataclasses.fields(__A ) if f.init}
_A = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
_A = dtype(**__A )
outputs.append(__A )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(__A )}""" )
return tuple(__A )
def __A ( self: Tuple , __A: str , __A: bool = False ) -> Tuple[DataClass, ...]:
with open(Path(__A ) , encoding='''utf-8''' ) as open_json_file:
_A = json.loads(open_json_file.read() )
_A = self.parse_dict(__A , allow_extra_keys=__A )
return tuple(__A )
def __A ( self: List[Any] , __A: str , __A: bool = False ) -> Tuple[DataClass, ...]:
_A = self.parse_dict(yaml.safe_load(Path(__A ).read_text() ) , allow_extra_keys=__A )
return tuple(__A )
| 62 | 0 |
"""simple docstring"""
from __future__ import annotations
from random import choice
def __snake_case ( SCREAMING_SNAKE_CASE: Dict ):
"""simple docstring"""
return choice(SCREAMING_SNAKE_CASE )
def __snake_case ( SCREAMING_SNAKE_CASE: list[int] , SCREAMING_SNAKE_CASE: int ):
"""simple docstring"""
_lowerCAmelCase = random_pivot(SCREAMING_SNAKE_CASE )
# partition based on pivot
# linear time
_lowerCAmelCase = [e for e in lst if e < pivot]
_lowerCAmelCase = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(SCREAMING_SNAKE_CASE ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(SCREAMING_SNAKE_CASE ) < k - 1:
return kth_number(SCREAMING_SNAKE_CASE , k - len(SCREAMING_SNAKE_CASE ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 580 |
"""simple docstring"""
import sys
_snake_case = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def __snake_case ( SCREAMING_SNAKE_CASE: str = N ):
"""simple docstring"""
_lowerCAmelCase = -sys.maxsize - 1
for i in range(len(SCREAMING_SNAKE_CASE ) - 12 ):
_lowerCAmelCase = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
_lowerCAmelCase = product
return largest_product
if __name__ == "__main__":
print(f'{solution() = }')
| 580 | 1 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCamelCase:
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any]=1_3 , SCREAMING_SNAKE_CASE : Optional[Any]=6_4 , SCREAMING_SNAKE_CASE : Union[str, Any]=2 , SCREAMING_SNAKE_CASE : Union[str, Any]=3 , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : Any=3_2 , SCREAMING_SNAKE_CASE : Any=5 , SCREAMING_SNAKE_CASE : List[Any]=4 , SCREAMING_SNAKE_CASE : Optional[int]=3_7 , SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE : Dict=0.1 , SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE : Optional[int]=1_0 , SCREAMING_SNAKE_CASE : int=0.02 , SCREAMING_SNAKE_CASE : Dict=[1, 1_6, 4, 4] , SCREAMING_SNAKE_CASE : List[str]=None , ) -> int:
'''simple docstring'''
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = scope
__snake_case = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
__snake_case = (self.image_size // 3_2) ** 2
__snake_case = num_patches + 1
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
__snake_case = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [4, 8, 1_6, 3_2],
"num_groups": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str ) -> int:
'''simple docstring'''
__snake_case = ViTHybridModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
__snake_case = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] ) -> List[Any]:
'''simple docstring'''
__snake_case = self.type_sequence_label_size
__snake_case = ViTHybridForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
__snake_case = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase( _a , _a , unittest.TestCase ):
snake_case_ : Union[str, Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
snake_case_ : str = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
snake_case_ : Tuple = False
snake_case_ : Optional[Any] = False
snake_case_ : Dict = False
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> int:
'''simple docstring'''
__snake_case = ViTHybridModelTester(self )
__snake_case = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Optional[Any]:
'''simple docstring'''
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(SCREAMING_SNAKE_CASE )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Dict:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Optional[Any]:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = _config_zero_init(SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
__snake_case = model_class(config=SCREAMING_SNAKE_CASE )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
__snake_case = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Tuple:
'''simple docstring'''
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = ViTHybridModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( ) -> List[str]:
'''simple docstring'''
__snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCamelCase( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> int:
'''simple docstring'''
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ) -> str:
'''simple docstring'''
__snake_case = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
SCREAMING_SNAKE_CASE )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__snake_case = model(**SCREAMING_SNAKE_CASE )
# verify the logits
__snake_case = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
__snake_case = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
@require_accelerate
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
__snake_case = ViTHybridImageProcessor.from_pretrained("google/vit-hybrid-base-bit-384" )
__snake_case = ViTHybridForImageClassification.from_pretrained("google/vit-hybrid-base-bit-384" , device_map="auto" )
__snake_case = prepare_img()
__snake_case = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" )
__snake_case = model(**SCREAMING_SNAKE_CASE )
__snake_case = outputs.logits
# model predicts one of the 1000 ImageNet classes
__snake_case = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , "tabby, tabby cat" )
| 473 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 10**-10 ) -> float:
'''simple docstring'''
__snake_case = a
while True:
__snake_case = Decimal(_lowerCAmelCase ) - (
Decimal(eval(_lowerCAmelCase ) ) / Decimal(eval(str(diff(_lowerCAmelCase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(_lowerCAmelCase ) ) < precision: # noqa: S307
return float(_lowerCAmelCase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(f'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(f'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(f'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 473 | 1 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class _lowerCamelCase :
"""simple docstring"""
snake_case = 42
snake_case = None
snake_case = None
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
# Validation
def is_valid_tree(SCREAMING_SNAKE_CASE ) -> bool:
if node is None:
return True
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , SCREAMING_SNAKE_CASE , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , SCREAMING_SNAKE_CASE )
)
return is_binary_search_tree_recursive_check(SCREAMING_SNAKE_CASE , -float('''inf''' ) , float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 590 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = 42
snake_case = 42
class _lowerCamelCase ( nn.Module ):
"""simple docstring"""
snake_case = 42
snake_case = (16, 32, 96, 256)
snake_case = jnp.floataa
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : Optional[int] = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
A_ : List[str] = []
for i in range(len(self.block_out_channels ) - 1 ):
A_ : Dict = self.block_out_channels[i]
A_ : Union[str, Any] = self.block_out_channels[i + 1]
A_ : Dict = nn.Conv(
_SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_SCREAMING_SNAKE_CASE )
A_ : str = nn.Conv(
_SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_SCREAMING_SNAKE_CASE )
A_ : str = blocks
A_ : List[str] = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , _SCREAMING_SNAKE_CASE )->str:
'''simple docstring'''
A_ : Optional[int] = self.conv_in(_SCREAMING_SNAKE_CASE )
A_ : str = nn.silu(_SCREAMING_SNAKE_CASE )
for block in self.blocks:
A_ : Union[str, Any] = block(_SCREAMING_SNAKE_CASE )
A_ : List[Any] = nn.silu(_SCREAMING_SNAKE_CASE )
A_ : List[Any] = self.conv_out(_SCREAMING_SNAKE_CASE )
return embedding
@flax_register_to_config
class _lowerCamelCase ( nn.Module , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
snake_case = 32
snake_case = 4
snake_case = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
snake_case = False
snake_case = (320, 640, 1_280, 1_280)
snake_case = 2
snake_case = 8
snake_case = None
snake_case = 1_280
snake_case = 0.0
snake_case = False
snake_case = jnp.floataa
snake_case = True
snake_case = 0
snake_case = "rgb"
snake_case = (16, 32, 96, 256)
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->FrozenDict:
'''simple docstring'''
A_ : Dict = (1, self.in_channels, self.sample_size, self.sample_size)
A_ : Any = jnp.zeros(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
A_ : str = jnp.ones((1,) , dtype=jnp.intaa )
A_ : Optional[Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
A_ : Dict = (1, 3, self.sample_size * 8, self.sample_size * 8)
A_ : int = jnp.zeros(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
A_ , A_ : Any = jax.random.split(_SCREAMING_SNAKE_CASE )
A_ : Dict = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )["params"]
def _snake_case ( self )->List[str]:
'''simple docstring'''
A_ : Union[str, Any] = self.block_out_channels
A_ : Any = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
A_ : Dict = self.num_attention_heads or self.attention_head_dim
# input
A_ : Dict = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
A_ : Optional[Any] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
A_ : int = FlaxTimestepEmbedding(_SCREAMING_SNAKE_CASE , dtype=self.dtype )
A_ : int = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
A_ : str = self.only_cross_attention
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : Optional[Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : List[str] = (num_attention_heads,) * len(self.down_block_types )
# down
A_ : List[str] = []
A_ : Optional[int] = []
A_ : Optional[int] = block_out_channels[0]
A_ : Tuple = nn.Conv(
_SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_SCREAMING_SNAKE_CASE )
for i, down_block_type in enumerate(self.down_block_types ):
A_ : Optional[int] = output_channel
A_ : int = block_out_channels[i]
A_ : Union[str, Any] = i == len(_SCREAMING_SNAKE_CASE ) - 1
if down_block_type == "CrossAttnDownBlock2D":
A_ : str = FlaxCrossAttnDownBlockaD(
in_channels=_SCREAMING_SNAKE_CASE , out_channels=_SCREAMING_SNAKE_CASE , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
A_ : Dict = FlaxDownBlockaD(
in_channels=_SCREAMING_SNAKE_CASE , out_channels=_SCREAMING_SNAKE_CASE , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_SCREAMING_SNAKE_CASE )
for _ in range(self.layers_per_block ):
A_ : List[str] = nn.Conv(
_SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_SCREAMING_SNAKE_CASE )
if not is_final_block:
A_ : List[Any] = nn.Conv(
_SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_SCREAMING_SNAKE_CASE )
A_ : List[str] = down_blocks
A_ : Tuple = controlnet_down_blocks
# mid
A_ : Dict = block_out_channels[-1]
A_ : str = FlaxUNetMidBlockaDCrossAttn(
in_channels=_SCREAMING_SNAKE_CASE , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
A_ : str = nn.Conv(
_SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1.0 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = False , )->Union[FlaxControlNetOutput, Tuple]:
'''simple docstring'''
A_ : Optional[int] = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
A_ : int = jnp.flip(_SCREAMING_SNAKE_CASE , axis=1 )
# 1. time
if not isinstance(_SCREAMING_SNAKE_CASE , jnp.ndarray ):
A_ : Tuple = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_SCREAMING_SNAKE_CASE , jnp.ndarray ) and len(timesteps.shape ) == 0:
A_ : Union[str, Any] = timesteps.astype(dtype=jnp.floataa )
A_ : Optional[Any] = jnp.expand_dims(_SCREAMING_SNAKE_CASE , 0 )
A_ : Optional[Any] = self.time_proj(_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = self.time_embedding(_SCREAMING_SNAKE_CASE )
# 2. pre-process
A_ : str = jnp.transpose(_SCREAMING_SNAKE_CASE , (0, 2, 3, 1) )
A_ : Optional[Any] = self.conv_in(_SCREAMING_SNAKE_CASE )
A_ : List[str] = jnp.transpose(_SCREAMING_SNAKE_CASE , (0, 2, 3, 1) )
A_ : List[Any] = self.controlnet_cond_embedding(_SCREAMING_SNAKE_CASE )
sample += controlnet_cond
# 3. down
A_ : List[str] = (sample,)
for down_block in self.down_blocks:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ , A_ : Tuple = down_block(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=not train )
else:
A_ , A_ : Union[str, Any] = down_block(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
A_ : Optional[int] = self.mid_block(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=not train )
# 5. contronet blocks
A_ : str = ()
for down_block_res_sample, controlnet_block in zip(_SCREAMING_SNAKE_CASE , self.controlnet_down_blocks ):
A_ : List[Any] = controlnet_block(_SCREAMING_SNAKE_CASE )
controlnet_down_block_res_samples += (down_block_res_sample,)
A_ : Dict = controlnet_down_block_res_samples
A_ : Tuple = self.controlnet_mid_block(_SCREAMING_SNAKE_CASE )
# 6. scaling
A_ : Dict = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=_SCREAMING_SNAKE_CASE , mid_block_res_sample=_SCREAMING_SNAKE_CASE )
| 590 | 1 |
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
_SCREAMING_SNAKE_CASE : Tuple = True
from torch.cuda.amp import autocast
_SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__)
def UpperCAmelCase_ ( _A=None , _A=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=_A )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
a = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
a = field(
default=A__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
a = field(
default=A__ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
a = field(
default=0.1 , metadata={"help": "The dropout ratio for the attention probabilities."} )
a = field(
default=0.1 , metadata={"help": "The dropout ratio for activations inside the fully connected layer."} )
a = field(
default=0.1 , metadata={
"help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."
} , )
a = field(
default=0.1 , metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."} , )
a = field(
default=0.0_5 , metadata={
"help": (
"Propability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
)
} , )
a = field(default=0.0 , metadata={"help": "The LayerDrop probability."} )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
a = field(
default=A__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
a = field(
default="train+validation" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
a = field(
default=A__ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
a = field(
default=A__ , metadata={"help": "The number of processes to use for the preprocessing."} , )
a = field(
default=A__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
a = field(
default=A__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
} , )
a = list_field(
default=[",", "?", ".", "!", "-", ";", ":", "\"\"", "%", "'", "\"", "�"] , metadata={"help": "A list of characters to remove from the transcripts."} , )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
a = 42
a = True
a = None
a = None
a = None
a = None
def __call__( self : Optional[Any] , __lowerCamelCase : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
SCREAMING_SNAKE_CASE__ = [{'''input_values''': feature['''input_values''']} for feature in features]
SCREAMING_SNAKE_CASE__ = [{'''input_ids''': feature['''labels''']} for feature in features]
SCREAMING_SNAKE_CASE__ = self.processor.pad(
__lowerCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE__ = self.processor.pad(
labels=__lowerCamelCase , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='''pt''' , )
# replace padding with -100 to ignore loss correctly
SCREAMING_SNAKE_CASE__ = labels_batch['''input_ids'''].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
SCREAMING_SNAKE_CASE__ = labels
return batch
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
def lowercase_ ( self : str , __lowerCamelCase : nn.Module , __lowerCamelCase : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
model.train()
SCREAMING_SNAKE_CASE__ = self._prepare_inputs(__lowerCamelCase )
if self.use_amp:
with autocast():
SCREAMING_SNAKE_CASE__ = self.compute_loss(__lowerCamelCase , __lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__ = self.compute_loss(__lowerCamelCase , __lowerCamelCase )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
SCREAMING_SNAKE_CASE__ = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
SCREAMING_SNAKE_CASE__ = loss.sum() / (inputs['''labels'''] >= 0).sum()
else:
raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
SCREAMING_SNAKE_CASE__ = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__lowerCamelCase ).backward()
elif self.use_apex:
with amp.scale_loss(__lowerCamelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__lowerCamelCase )
else:
loss.backward()
return loss.detach()
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , _A )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
SCREAMING_SNAKE_CASE__ = datasets.load_dataset(
'''common_voice''' , data_args.dataset_config_name , split=data_args.train_split_name )
SCREAMING_SNAKE_CASE__ = datasets.load_dataset('''common_voice''' , data_args.dataset_config_name , split='''test''' )
# Create and save tokenizer
SCREAMING_SNAKE_CASE__ = F'''[{"".join(data_args.chars_to_ignore )}]'''
def remove_special_characters(_A ):
SCREAMING_SNAKE_CASE__ = re.sub(_A , '''''' , batch['''sentence'''] ).lower() + ''' '''
return batch
SCREAMING_SNAKE_CASE__ = train_dataset.map(_A , remove_columns=['''sentence'''] )
SCREAMING_SNAKE_CASE__ = eval_dataset.map(_A , remove_columns=['''sentence'''] )
def extract_all_chars(_A ):
SCREAMING_SNAKE_CASE__ = ''' '''.join(batch['''text'''] )
SCREAMING_SNAKE_CASE__ = list(set(_A ) )
return {"vocab": [vocab], "all_text": [all_text]}
SCREAMING_SNAKE_CASE__ = train_dataset.map(
_A , batched=_A , batch_size=-1 , keep_in_memory=_A , remove_columns=train_dataset.column_names , )
SCREAMING_SNAKE_CASE__ = train_dataset.map(
_A , batched=_A , batch_size=-1 , keep_in_memory=_A , remove_columns=eval_dataset.column_names , )
SCREAMING_SNAKE_CASE__ = list(set(vocab_train['''vocab'''][0] ) | set(vocab_test['''vocab'''][0] ) )
SCREAMING_SNAKE_CASE__ = {v: k for k, v in enumerate(_A )}
SCREAMING_SNAKE_CASE__ = vocab_dict[''' ''']
del vocab_dict[" "]
SCREAMING_SNAKE_CASE__ = len(_A )
SCREAMING_SNAKE_CASE__ = len(_A )
with open('''vocab.json''' , '''w''' ) as vocab_file:
json.dump(_A , _A )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE__ = WavaVecaCTCTokenizer(
'''vocab.json''' , unk_token='''[UNK]''' , pad_token='''[PAD]''' , word_delimiter_token='''|''' , )
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0.0 , do_normalize=_A , return_attention_mask=_A )
SCREAMING_SNAKE_CASE__ = WavaVecaProcessor(feature_extractor=_A , tokenizer=_A )
SCREAMING_SNAKE_CASE__ = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='''mean''' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE__ = min(len(_A ) , data_args.max_train_samples )
SCREAMING_SNAKE_CASE__ = train_dataset.select(range(_A ) )
if data_args.max_val_samples is not None:
SCREAMING_SNAKE_CASE__ = eval_dataset.select(range(data_args.max_val_samples ) )
SCREAMING_SNAKE_CASE__ = torchaudio.transforms.Resample(4_80_00 , 1_60_00 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(_A ):
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = torchaudio.load(batch['''path'''] )
SCREAMING_SNAKE_CASE__ = resampler(_A ).squeeze().numpy()
SCREAMING_SNAKE_CASE__ = 1_60_00
SCREAMING_SNAKE_CASE__ = batch['''text''']
return batch
SCREAMING_SNAKE_CASE__ = train_dataset.map(
_A , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
SCREAMING_SNAKE_CASE__ = eval_dataset.map(
_A , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(_A ):
# check that all files have the correct sampling rate
assert (
len(set(batch['''sampling_rate'''] ) ) == 1
), F'''Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'''
SCREAMING_SNAKE_CASE__ = processor(
audio=batch['''speech'''] , text=batch['''target_text'''] , sampling_rate=batch['''sampling_rate'''][0] )
batch.update(_A )
return batch
SCREAMING_SNAKE_CASE__ = train_dataset.map(
_A , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_A , num_proc=data_args.preprocessing_num_workers , )
SCREAMING_SNAKE_CASE__ = eval_dataset.map(
_A , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_A , num_proc=data_args.preprocessing_num_workers , )
# Metric
SCREAMING_SNAKE_CASE__ = datasets.load_metric('''wer''' )
def compute_metrics(_A ):
SCREAMING_SNAKE_CASE__ = pred.predictions
SCREAMING_SNAKE_CASE__ = np.argmax(_A , axis=-1 )
SCREAMING_SNAKE_CASE__ = processor.tokenizer.pad_token_id
SCREAMING_SNAKE_CASE__ = processor.batch_decode(_A )
# we do not want to group tokens when computing the metrics
SCREAMING_SNAKE_CASE__ = processor.batch_decode(pred.label_ids , group_tokens=_A )
SCREAMING_SNAKE_CASE__ = wer_metric.compute(predictions=_A , references=_A )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
SCREAMING_SNAKE_CASE__ = DataCollatorCTCWithPadding(processor=_A , padding=_A )
# Initialize our Trainer
SCREAMING_SNAKE_CASE__ = CTCTrainer(
model=_A , data_collator=_A , args=_A , compute_metrics=_A , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
SCREAMING_SNAKE_CASE__ = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
SCREAMING_SNAKE_CASE__ = model_args.model_name_or_path
else:
SCREAMING_SNAKE_CASE__ = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
SCREAMING_SNAKE_CASE__ = trainer.train(resume_from_checkpoint=_A )
trainer.save_model()
SCREAMING_SNAKE_CASE__ = train_result.metrics
SCREAMING_SNAKE_CASE__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_A )
)
SCREAMING_SNAKE_CASE__ = min(_A , len(_A ) )
trainer.log_metrics('''train''' , _A )
trainer.save_metrics('''train''' , _A )
trainer.save_state()
# Evaluation
SCREAMING_SNAKE_CASE__ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
SCREAMING_SNAKE_CASE__ = trainer.evaluate()
SCREAMING_SNAKE_CASE__ = data_args.max_val_samples if data_args.max_val_samples is not None else len(_A )
SCREAMING_SNAKE_CASE__ = min(_A , len(_A ) )
trainer.log_metrics('''eval''' , _A )
trainer.save_metrics('''eval''' , _A )
return results
if __name__ == "__main__":
main()
| 710 |
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def UpperCAmelCase_ ( _A , _A , _A=[] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = size[0] - overlap_pixels * 2
SCREAMING_SNAKE_CASE__ = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
SCREAMING_SNAKE_CASE__ = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55
SCREAMING_SNAKE_CASE__ = np.pad(_A , mode='''linear_ramp''' , pad_width=_A , end_values=0 )
if "l" in remove_borders:
SCREAMING_SNAKE_CASE__ = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
SCREAMING_SNAKE_CASE__ = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
SCREAMING_SNAKE_CASE__ = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
SCREAMING_SNAKE_CASE__ = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
return max(_A , min(_A , _A ) )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = list(_A )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
SCREAMING_SNAKE_CASE__ = clamp_rect(_A , [0, 0] , [image_size[0], image_size[1]] )
return rect
def UpperCAmelCase_ ( _A , _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = Image.new('''RGB''' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(_A , (original_slice, 0) )
return result
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
SCREAMING_SNAKE_CASE__ = tile.crop(_A )
return tile
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = n % d
return n - divisor
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
def __init__( self : int , __lowerCamelCase : AutoencoderKL , __lowerCamelCase : CLIPTextModel , __lowerCamelCase : CLIPTokenizer , __lowerCamelCase : UNetaDConditionModel , __lowerCamelCase : DDPMScheduler , __lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __lowerCamelCase : int = 350 , ) -> int:
super().__init__(
vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , unet=__lowerCamelCase , low_res_scheduler=__lowerCamelCase , scheduler=__lowerCamelCase , max_noise_level=__lowerCamelCase , )
def lowercase_ ( self : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : str , **__lowerCamelCase : Tuple ) -> Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
SCREAMING_SNAKE_CASE__ = add_overlap_rect(__lowerCamelCase , __lowerCamelCase , image.size )
SCREAMING_SNAKE_CASE__ = image.crop(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
SCREAMING_SNAKE_CASE__ = translated_slice_x - (original_image_slice / 2)
SCREAMING_SNAKE_CASE__ = max(0 , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = squeeze_tile(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = to_input.size
SCREAMING_SNAKE_CASE__ = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
SCREAMING_SNAKE_CASE__ = super(__lowerCamelCase , self ).__call__(image=__lowerCamelCase , **__lowerCamelCase ).images[0]
SCREAMING_SNAKE_CASE__ = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
SCREAMING_SNAKE_CASE__ = unsqueeze_tile(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
SCREAMING_SNAKE_CASE__ = []
if x == 0:
remove_borders.append('''l''' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('''r''' )
if y == 0:
remove_borders.append('''t''' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('''b''' )
SCREAMING_SNAKE_CASE__ = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__lowerCamelCase ) , mode='''L''' , )
final_image.paste(
__lowerCamelCase , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __lowerCamelCase )
@torch.no_grad()
def __call__( self : List[str] , __lowerCamelCase : Union[str, List[str]] , __lowerCamelCase : Union[PIL.Image.Image, List[PIL.Image.Image]] , __lowerCamelCase : int = 75 , __lowerCamelCase : float = 9.0 , __lowerCamelCase : int = 50 , __lowerCamelCase : Optional[Union[str, List[str]]] = None , __lowerCamelCase : Optional[int] = 1 , __lowerCamelCase : float = 0.0 , __lowerCamelCase : Optional[torch.Generator] = None , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowerCamelCase : int = 1 , __lowerCamelCase : int = 128 , __lowerCamelCase : int = 32 , __lowerCamelCase : int = 32 , ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = Image.new('''RGB''' , (image.size[0] * 4, image.size[1] * 4) )
SCREAMING_SNAKE_CASE__ = math.ceil(image.size[0] / tile_size )
SCREAMING_SNAKE_CASE__ = math.ceil(image.size[1] / tile_size )
SCREAMING_SNAKE_CASE__ = tcx * tcy
SCREAMING_SNAKE_CASE__ = 0
for y in range(__lowerCamelCase ):
for x in range(__lowerCamelCase ):
self._process_tile(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , prompt=__lowerCamelCase , num_inference_steps=__lowerCamelCase , guidance_scale=__lowerCamelCase , noise_level=__lowerCamelCase , negative_prompt=__lowerCamelCase , num_images_per_prompt=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , latents=__lowerCamelCase , )
current_count += 1
if callback is not None:
callback({'''progress''': current_count / total_tile_count, '''image''': final_image} )
return final_image
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''stabilityai/stable-diffusion-x4-upscaler'''
SCREAMING_SNAKE_CASE__ = StableDiffusionTiledUpscalePipeline.from_pretrained(_A , revision='''fp16''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ = pipe.to('''cuda''' )
SCREAMING_SNAKE_CASE__ = Image.open('''../../docs/source/imgs/diffusers_library.jpg''' )
def callback(_A ):
print(F'''progress: {obj["progress"]:.4f}''' )
obj["image"].save('''diffusers_library_progress.jpg''' )
SCREAMING_SNAKE_CASE__ = pipe(image=_A , prompt='''Black font, white background, vector''' , noise_level=40 , callback=_A )
final_image.save('''diffusers_library.jpg''' )
if __name__ == "__main__":
main()
| 472 | 0 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
a_ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''),
('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
]
)
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
snake_case_ : str = state_dict.pop(SCREAMING_SNAKE_CASE__ )
snake_case_ : Union[str, Any] = val
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : Optional[int] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
snake_case_ : List[Any] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
snake_case_ : Optional[Any] = value
else:
snake_case_ : Optional[int] = value
return new_state_dict
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
snake_case_ : Any = """"""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case_ : str = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
snake_case_ : Tuple = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
snake_case_ : Any = in_proj_weight[:2_5_6, :]
snake_case_ : str = in_proj_bias[:2_5_6]
snake_case_ : List[str] = in_proj_weight[2_5_6:5_1_2, :]
snake_case_ : Union[str, Any] = in_proj_bias[2_5_6:5_1_2]
snake_case_ : Optional[int] = in_proj_weight[-2_5_6:, :]
snake_case_ : Dict = in_proj_bias[-2_5_6:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
snake_case_ : List[str] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
snake_case_ : Any = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
snake_case_ : Optional[int] = in_proj_weight[:2_5_6, :]
snake_case_ : Dict = in_proj_bias[:2_5_6]
snake_case_ : Any = in_proj_weight[2_5_6:5_1_2, :]
snake_case_ : Union[str, Any] = in_proj_bias[2_5_6:5_1_2]
snake_case_ : Tuple = in_proj_weight[-2_5_6:, :]
snake_case_ : Union[str, Any] = in_proj_bias[-2_5_6:]
# read in weights + bias of input projection layer of cross-attention
snake_case_ : Tuple = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
snake_case_ : Union[str, Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
snake_case_ : Tuple = in_proj_weight_cross_attn[:2_5_6, :]
snake_case_ : List[Any] = in_proj_bias_cross_attn[:2_5_6]
snake_case_ : List[str] = in_proj_weight_cross_attn[2_5_6:5_1_2, :]
snake_case_ : str = in_proj_bias_cross_attn[2_5_6:5_1_2]
snake_case_ : List[Any] = in_proj_weight_cross_attn[-2_5_6:, :]
snake_case_ : List[str] = in_proj_bias_cross_attn[-2_5_6:]
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ , snake_case_ : Any = image.size
snake_case_ : Tuple = max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : Union[str, Any] = 8_0_0 if """detection""" in checkpoint_url else 1_0_0_0
snake_case_ : str = target_max_size / current_max_size
snake_case_ : List[str] = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
snake_case_ : List[Any] = F.to_tensor(SCREAMING_SNAKE_CASE__ )
snake_case_ : int = F.normalize(SCREAMING_SNAKE_CASE__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
logger.info("""Converting model...""" )
# load original state dict
snake_case_ : str = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )
# rename keys
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : Any = rename_backbone_keys(SCREAMING_SNAKE_CASE__ )
# query, key and value matrices need special treatment
read_in_q_k_v(SCREAMING_SNAKE_CASE__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case_ : Any = """model."""
for key in state_dict.copy().keys():
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
snake_case_ : List[Any] = state_dict.pop(SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = val
# create HuggingFace model and load state dict
snake_case_ : str = TableTransformerConfig(
backbone="""resnet18""" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
snake_case_ : Any = 1_5
snake_case_ : Optional[Any] = 2
snake_case_ : Dict = {0: """table""", 1: """table rotated"""}
snake_case_ : Union[str, Any] = idalabel
snake_case_ : str = {v: k for k, v in idalabel.items()}
else:
snake_case_ : Union[str, Any] = 1_2_5
snake_case_ : List[Any] = 6
snake_case_ : List[Any] = {
0: """table""",
1: """table column""",
2: """table row""",
3: """table column header""",
4: """table projected row header""",
5: """table spanning cell""",
}
snake_case_ : Tuple = idalabel
snake_case_ : int = {v: k for k, v in idalabel.items()}
snake_case_ : Any = DetrImageProcessor(
format="""coco_detection""" , max_size=8_0_0 if """detection""" in checkpoint_url else 1_0_0_0 )
snake_case_ : Tuple = TableTransformerForObjectDetection(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
# verify our conversion
snake_case_ : Tuple = """example_pdf.png""" if """detection""" in checkpoint_url else """example_table.png"""
snake_case_ : Optional[Any] = hf_hub_download(repo_id="""nielsr/example-pdf""" , repo_type="""dataset""" , filename=SCREAMING_SNAKE_CASE__ )
snake_case_ : int = Image.open(SCREAMING_SNAKE_CASE__ ).convert("""RGB""" )
snake_case_ : Optional[Any] = normalize(resize(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ).unsqueeze(0 )
snake_case_ : Optional[int] = model(SCREAMING_SNAKE_CASE__ )
if "detection" in checkpoint_url:
snake_case_ : int = (1, 1_5, 3)
snake_case_ : Tuple = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
snake_case_ : Any = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
snake_case_ : int = (1, 1_2_5, 7)
snake_case_ : int = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
snake_case_ : int = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
# Push model to HF hub
logger.info("""Pushing model to the hub...""" )
snake_case_ : Optional[Any] = (
"""microsoft/table-transformer-detection"""
if """detection""" in checkpoint_url
else """microsoft/table-transformer-structure-recognition"""
)
model.push_to_hub(SCREAMING_SNAKE_CASE__ )
image_processor.push_to_hub(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
type=str,
choices=[
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''',
],
help='''URL of the Table Transformer checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
a_ = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 480 |
"""simple docstring"""
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class __lowercase :
"""simple docstring"""
_A : float
_A : TreeNode | None = None
_A : TreeNode | None = None
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : TreeNode | None ):
"""simple docstring"""
def is_valid_tree(SCREAMING_SNAKE_CASE__ : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"""Each node should be type of TreeNode and data should be float.""" )
def is_binary_search_tree_recursive_check(
SCREAMING_SNAKE_CASE__ : TreeNode | None , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , SCREAMING_SNAKE_CASE__ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , SCREAMING_SNAKE_CASE__ )
)
return is_binary_search_tree_recursive_check(SCREAMING_SNAKE_CASE__ , -float("""inf""" ) , float("""inf""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 480 | 1 |
def _snake_case ( __snake_case , __snake_case , __snake_case ):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__snake_case , n - 1 , __snake_case ) * a) % mod
else:
_UpperCamelCase = binary_exponentiation(__snake_case , n / 2 , __snake_case )
return (b * b) % mod
# a prime number
_lowerCAmelCase = 701
_lowerCAmelCase = 1_000_000_000
_lowerCAmelCase = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 706 | import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=_A ).to(_A )
_UpperCamelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' )
_UpperCamelCase = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
_UpperCamelCase = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
_UpperCamelCase = model(input_ids.to(_A ) , labels=labels.to(_A ) ).loss
_UpperCamelCase = -(labels.shape[-1] * loss.item())
_UpperCamelCase = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 71 | 0 |
'''simple docstring'''
from __future__ import annotations
import time
UpperCamelCase__ = list[tuple[int, int]]
UpperCamelCase__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCamelCase__ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , _A : int , _A : int , _A : int , _A : int , _A : Node | None ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = pos_x
UpperCAmelCase__ : Optional[int] = pos_y
UpperCAmelCase__ : Optional[int] = (pos_y, pos_x)
UpperCAmelCase__ : Optional[Any] = goal_x
UpperCAmelCase__ : Tuple = goal_y
UpperCAmelCase__ : Union[str, Any] = parent
class lowerCamelCase_ :
def __init__( self : int , _A : tuple[int, int] , _A : tuple[int, int] ):
'''simple docstring'''
UpperCAmelCase__ : int = Node(start[1] , start[0] , goal[1] , goal[0] , _A )
UpperCAmelCase__ : int = Node(goal[1] , goal[0] , goal[1] , goal[0] , _A )
UpperCAmelCase__ : int = [self.start]
UpperCAmelCase__ : List[str] = False
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
while self.node_queue:
UpperCAmelCase__ : Dict = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
UpperCAmelCase__ : Tuple = True
return self.retrace_path(_A )
UpperCAmelCase__ : Dict = self.get_successors(_A )
for node in successors:
self.node_queue.append(_A )
if not self.reached:
return [self.start.pos]
return None
def lowercase_ ( self : Optional[int] , _A : Node ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = []
for action in delta:
UpperCAmelCase__ : int = parent.pos_x + action[1]
UpperCAmelCase__ : str = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(_A , _A , self.target.pos_y , self.target.pos_x , _A ) )
return successors
def lowercase_ ( self : Any , _A : Node | None ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = node
UpperCAmelCase__ : Tuple = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCAmelCase__ : Dict = current_node.parent
path.reverse()
return path
class lowerCamelCase_ :
def __init__( self : int , _A : Optional[Any] , _A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = BreadthFirstSearch(_A , _A )
UpperCAmelCase__ : Dict = BreadthFirstSearch(_A , _A )
UpperCAmelCase__ : Union[str, Any] = False
def lowercase_ ( self : List[str] ):
'''simple docstring'''
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
UpperCAmelCase__ : Tuple = self.fwd_bfs.node_queue.pop(0 )
UpperCAmelCase__ : Optional[Any] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
UpperCAmelCase__ : Optional[Any] = True
return self.retrace_bidirectional_path(
_A , _A )
UpperCAmelCase__ : List[str] = current_bwd_node
UpperCAmelCase__ : Dict = current_fwd_node
UpperCAmelCase__ : str = {
self.fwd_bfs: self.fwd_bfs.get_successors(_A ),
self.bwd_bfs: self.bwd_bfs.get_successors(_A ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(_A )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def lowercase_ ( self : Dict , _A : Node , _A : Node ):
'''simple docstring'''
UpperCAmelCase__ : int = self.fwd_bfs.retrace_path(_A )
UpperCAmelCase__ : str = self.bwd_bfs.retrace_path(_A )
bwd_path.pop()
bwd_path.reverse()
UpperCAmelCase__ : str = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
UpperCamelCase__ = (0, 0)
UpperCamelCase__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
UpperCamelCase__ = time.time()
UpperCamelCase__ = BreadthFirstSearch(init, goal)
UpperCamelCase__ = bfs.search()
UpperCamelCase__ = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
UpperCamelCase__ = time.time()
UpperCamelCase__ = BidirectionalBreadthFirstSearch(init, goal)
UpperCamelCase__ = bd_bfs.search()
UpperCamelCase__ = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 75 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE :int = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def UpperCAmelCase ( a_ = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
__A = BeautifulSoup(requests.get(url + location ).content , "html.parser" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("div" , attrs={"data-tn-component": "organicJob"} ):
__A = job.find("a" , attrs={"data-tn-element": "jobTitle"} ).text.strip()
__A = job.find("span" , {"class": "company"} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f'''Job {i:>2} is {job[0]} at {job[1]}''')
| 55 | 0 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCamelCase__:
def __init__( self: str , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[str]=13 , UpperCamelCase_: List[Any]=64 , UpperCamelCase_: List[Any]=2 , UpperCamelCase_: List[Any]=3 , UpperCamelCase_: Tuple=True , UpperCamelCase_: int=True , UpperCamelCase_: List[str]=32 , UpperCamelCase_: Dict=5 , UpperCamelCase_: List[Any]=4 , UpperCamelCase_: Optional[int]=37 , UpperCamelCase_: str="gelu" , UpperCamelCase_: Optional[int]=0.1 , UpperCamelCase_: List[str]=0.1 , UpperCamelCase_: Tuple=10 , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: List[str]=[1, 16, 4, 4] , UpperCamelCase_: Optional[int]=None , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = is_training
__lowerCamelCase = use_labels
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = scope
__lowerCamelCase = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
__lowerCamelCase = (self.image_size // 32) ** 2
__lowerCamelCase = num_patches + 1
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [4, 8, 16, 32],
"""num_groups""": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=UpperCamelCase_ , )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: List[str] , UpperCamelCase_: Any , UpperCamelCase_: str ):
__lowerCamelCase = ViTHybridModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: Tuple ):
__lowerCamelCase = self.type_sequence_label_size
__lowerCamelCase = ViTHybridForImageClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = config_and_inputs
__lowerCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Optional[Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
UpperCAmelCase__ : List[str] = (
{'feature-extraction': ViTHybridModel, 'image-classification': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Tuple = False
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = ViTHybridModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def lowerCAmelCase__ ( self: Optional[int] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def lowerCAmelCase__ ( self: List[str] ):
pass
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , nn.Linear ) )
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(UpperCamelCase_ )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = _config_zero_init(UpperCamelCase_ )
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(config=UpperCamelCase_ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
__lowerCamelCase = [F'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def lowerCAmelCase__ ( self: Tuple ):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = ViTHybridModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase__( unittest.TestCase):
@cached_property
def lowerCAmelCase__ ( self: str ):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
UpperCamelCase_ )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(images=UpperCamelCase_ , return_tensors="""pt""" ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
__lowerCamelCase = model(**UpperCamelCase_ )
# verify the logits
__lowerCamelCase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
__lowerCamelCase = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 ) )
@slow
@require_accelerate
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = ViTHybridImageProcessor.from_pretrained("""google/vit-hybrid-base-bit-384""" )
__lowerCamelCase = ViTHybridForImageClassification.from_pretrained("""google/vit-hybrid-base-bit-384""" , device_map="""auto""" )
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(images=UpperCamelCase_ , return_tensors="""pt""" )
__lowerCamelCase = model(**UpperCamelCase_ )
__lowerCamelCase = outputs.logits
# model predicts one of the 1000 ImageNet classes
__lowerCamelCase = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , """tabby, tabby cat""" )
| 711 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json',
'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json',
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json',
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json',
'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json',
'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json',
'cl-tohoku/bert-base-japanese-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'
),
'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = 'bert'
def __init__( self: List[str] , UpperCamelCase_: str=3_05_22 , UpperCamelCase_: Optional[int]=7_68 , UpperCamelCase_: Tuple=12 , UpperCamelCase_: int=12 , UpperCamelCase_: int=30_72 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Optional[int]=5_12 , UpperCamelCase_: List[Any]=2 , UpperCamelCase_: int=0.02 , UpperCamelCase_: List[str]=1E-12 , UpperCamelCase_: Dict=0 , UpperCamelCase_: List[Any]="absolute" , UpperCamelCase_: Tuple=True , UpperCamelCase_: Tuple=None , **UpperCamelCase_: Optional[Any] , ):
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = position_embedding_type
__lowerCamelCase = use_cache
__lowerCamelCase = classifier_dropout
class lowerCamelCase__( __lowerCamelCase):
@property
def lowerCAmelCase__ ( self: Any ):
if self.task == "multiple-choice":
__lowerCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowerCamelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 80 | 0 |
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def _a ( lowercase__ : Dict ):
'''simple docstring'''
if isinstance(lowercase__ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class snake_case :
def __lowercase( self : Dict , a_ : List[Any] , a_ : List[Any] )-> int:
"""simple docstring"""
pass
def __lowercase( self : Union[str, Any] )-> str:
"""simple docstring"""
pass
def __lowercase( self : Union[str, Any] )-> List[Any]:
"""simple docstring"""
pass
def __lowercase( self : Any , a_ : np.ndarray , a_ : np.ndarray , a_ : float )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = np.abs((a - b) ).max()
self.assertLessEqual(a_ , a_ , F'''Difference between torch and flax is {diff} (>= {tol}).''' )
def __lowercase( self : Optional[Any] , a_ : Optional[int] , a_ : Any , a_ : Optional[Any] , a_ : Dict , a_ : List[Any]=None , **a_ : Optional[int] )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = VisionTextDualEncoderConfig.from_vision_text_configs(a_ , a_ )
SCREAMING_SNAKE_CASE__ : Dict = FlaxVisionTextDualEncoderModel(a_ )
SCREAMING_SNAKE_CASE__ : Dict = model(input_ids=a_ , pixel_values=a_ , attention_mask=a_ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def __lowercase( self : str , a_ : List[Any] , a_ : Dict , a_ : str , a_ : Optional[Any] , a_ : Optional[int]=None , **a_ : List[Any] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_vision_text_model(a_ , a_ )
SCREAMING_SNAKE_CASE__ : Tuple = {'vision_model': vision_model, 'text_model': text_model}
SCREAMING_SNAKE_CASE__ : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**a_ )
SCREAMING_SNAKE_CASE__ : Dict = model(input_ids=a_ , pixel_values=a_ , attention_mask=a_ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __lowercase( self : Dict , a_ : Any , a_ : Any , a_ : List[str] , a_ : str , a_ : int=None , **a_ : Any )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.get_vision_text_model(a_ , a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {'vision_model': vision_model, 'text_model': text_model}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(input_ids=a_ , pixel_values=a_ , attention_mask=a_ )
SCREAMING_SNAKE_CASE__ : List[str] = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a_ )
SCREAMING_SNAKE_CASE__ : Dict = FlaxVisionTextDualEncoderModel.from_pretrained(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = model(input_ids=a_ , pixel_values=a_ , attention_mask=a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = after_output[0]
SCREAMING_SNAKE_CASE__ : Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a_ , 1e-3 )
def __lowercase( self : Optional[int] , a_ : List[Any] , a_ : Optional[int] , a_ : List[str] , a_ : int , a_ : int=None , **a_ : Union[str, Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.get_vision_text_model(a_ , a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = {'vision_model': vision_model, 'text_model': text_model}
SCREAMING_SNAKE_CASE__ : List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**a_ )
SCREAMING_SNAKE_CASE__ : List[str] = model(
input_ids=a_ , pixel_values=a_ , attention_mask=a_ , output_attentions=a_ )
SCREAMING_SNAKE_CASE__ : int = output.vision_model_output.attentions
self.assertEqual(len(a_ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE__ : Any = to_atuple(vision_model.config.image_size )
SCREAMING_SNAKE_CASE__ : Dict = to_atuple(vision_model.config.patch_size )
SCREAMING_SNAKE_CASE__ : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
SCREAMING_SNAKE_CASE__ : Dict = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
SCREAMING_SNAKE_CASE__ : int = output.text_model_output.attentions
self.assertEqual(len(a_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __lowercase( self : str , a_ : Optional[Any] , a_ : Optional[int] , a_ : List[str] )-> Optional[int]:
"""simple docstring"""
pt_model.to(a_ )
pt_model.eval()
# prepare inputs
SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs_dict
SCREAMING_SNAKE_CASE__ : Tuple = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Any = pt_model(**a_ ).to_tuple()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = fx_model(**a_ ).to_tuple()
self.assertEqual(len(a_ ) , len(a_ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(a_ , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(a_ )
SCREAMING_SNAKE_CASE__ : str = FlaxVisionTextDualEncoderModel.from_pretrained(a_ , from_pt=a_ )
SCREAMING_SNAKE_CASE__ : int = fx_model_loaded(**a_ ).to_tuple()
self.assertEqual(len(a_ ) , len(a_ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(a_ , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = VisionTextDualEncoderModel.from_pretrained(a_ , from_flax=a_ )
pt_model_loaded.to(a_ )
pt_model_loaded.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = pt_model_loaded(**a_ ).to_tuple()
self.assertEqual(len(a_ ) , len(a_ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(a_ , pt_output_loaded.numpy() , 4e-2 )
def __lowercase( self : int , a_ : List[str] , a_ : str , a_ : Tuple )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(a_ , a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = VisionTextDualEncoderModel(a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = FlaxVisionTextDualEncoderModel(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , a_ )
SCREAMING_SNAKE_CASE__ : str = fx_state
self.check_pt_flax_equivalence(a_ , a_ , a_ )
def __lowercase( self : int , a_ : List[Any] , a_ : int , a_ : List[Any] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(a_ , a_ )
SCREAMING_SNAKE_CASE__ : List[str] = VisionTextDualEncoderModel(a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = FlaxVisionTextDualEncoderModel(a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = load_flax_weights_in_pytorch_model(a_ , fx_model.params )
self.check_pt_flax_equivalence(a_ , a_ , a_ )
def __lowercase( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**a_ )
def __lowercase( self : Any )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**a_ )
def __lowercase( self : str )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.prepare_config_and_inputs()
self.check_save_load(**a_ )
def __lowercase( self : int )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**a_ )
@is_pt_flax_cross_test
def __lowercase( self : int )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : Optional[int] = config_inputs_dict.pop('vision_config' )
SCREAMING_SNAKE_CASE__ : List[Any] = config_inputs_dict.pop('text_config' )
SCREAMING_SNAKE_CASE__ : Tuple = config_inputs_dict
self.check_equivalence_pt_to_flax(a_ , a_ , a_ )
self.check_equivalence_flax_to_pt(a_ , a_ , a_ )
@slow
def __lowercase( self : str )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.get_pretrained_model_and_inputs()
SCREAMING_SNAKE_CASE__ : Any = model_a(**a_ )
SCREAMING_SNAKE_CASE__ : str = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(a_ )
SCREAMING_SNAKE_CASE__ : Any = FlaxVisionTextDualEncoderModel.from_pretrained(a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_a(**a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = after_outputs[0]
SCREAMING_SNAKE_CASE__ : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a_ , 1e-5 )
@require_flax
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
def __lowercase( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-bert' , vision_from_pt=a_ , text_from_pt=a_ , )
SCREAMING_SNAKE_CASE__ : Tuple = 13
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = random_attention_mask([batch_size, 4] )
SCREAMING_SNAKE_CASE__ : Tuple = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __lowercase( self : List[str] , a_ : Any , a_ : Tuple )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = FlaxViTModel(a_ )
SCREAMING_SNAKE_CASE__ : int = FlaxBertModel(a_ )
return vision_model, text_model
def __lowercase( self : Union[str, Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = FlaxViTModelTester(self )
SCREAMING_SNAKE_CASE__ : Dict = FlaxBertModelTester(self )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = vit_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : Dict = bert_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = vision_config_and_inputs
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
def __lowercase( self : List[Any] )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-clip' , 'hf-internal-testing/tiny-bert' , vision_from_pt=a_ , text_from_pt=a_ , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 13
SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = random_attention_mask([batch_size, 4] )
SCREAMING_SNAKE_CASE__ : List[str] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __lowercase( self : str , a_ : Optional[int] , a_ : str )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = FlaxCLIPVisionModel(a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = FlaxBertModel(a_ )
return vision_model, text_model
def __lowercase( self : str )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = FlaxCLIPVisionModelTester(self )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = FlaxBertModelTester(self )
SCREAMING_SNAKE_CASE__ : Any = clip_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : int = bert_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = vision_config_and_inputs
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class snake_case ( unittest.TestCase ):
@slow
def __lowercase( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian' , logit_scale_init_value=1.0 )
SCREAMING_SNAKE_CASE__ : Tuple = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
SCREAMING_SNAKE_CASE__ : Optional[int] = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=a_ , padding=a_ , return_tensors='np' )
SCREAMING_SNAKE_CASE__ : Tuple = model(**a_ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
SCREAMING_SNAKE_CASE__ : Any = np.array([[1.228_4727, 0.310_4122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , a_ , atol=1e-3 ) )
| 85 |
from __future__ import annotations
import requests
UpperCamelCase_ = set(
'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split()
)
def _UpperCAmelCase ( A , A = 1 , A = "new" , A = None ):
'''simple docstring'''
UpperCAmelCase__ =wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(A ) - valid_terms ) ):
UpperCAmelCase__ =F"""Invalid search term: {invalid_search_terms}"""
raise ValueError(A )
UpperCAmelCase__ =requests.get(
F"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={"User-agent": "A random string"} , )
if response.status_code == 429:
raise requests.HTTPError
UpperCAmelCase__ =response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(A )}
UpperCAmelCase__ ={}
for id_ in range(A ):
UpperCAmelCase__ ={
item: data["data"]["children"][id_]["data"][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext']))
| 625 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ : Union[str, Any] = {
'configuration_clipseg': [
'CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPSegConfig',
'CLIPSegTextConfig',
'CLIPSegVisionConfig',
],
'processing_clipseg': ['CLIPSegProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Tuple = [
'CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPSegModel',
'CLIPSegPreTrainedModel',
'CLIPSegTextModel',
'CLIPSegVisionModel',
'CLIPSegForImageSegmentation',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
lowercase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 709 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase_ : List[str] = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = ['MaskFormerFeatureExtractor']
lowercase_ : Dict = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : str = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
lowercase_ : Optional[Any] = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
lowercase_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 107 | 0 |
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('.')
def snake_case (__lowercase ) -> Union[str, Any]:
'''simple docstring'''
_snake_case : int = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
F"""{test_file} instead.""" )
_snake_case : Any = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
_snake_case : str = components[:-1] + [test_fn.replace(".py" , "" )]
_snake_case : List[Any] = ".".join(UpperCamelCase__ )
return test_module_path
def snake_case (__lowercase ) -> Dict:
'''simple docstring'''
_snake_case : List[Any] = get_module_path(UpperCamelCase__ )
_snake_case : Optional[Any] = importlib.import_module(UpperCamelCase__ )
return test_module
def snake_case (__lowercase ) -> Union[str, Any]:
'''simple docstring'''
_snake_case : Optional[int] = []
_snake_case : Optional[int] = get_test_module(UpperCamelCase__ )
for attr in dir(UpperCamelCase__ ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(UpperCamelCase__ , UpperCamelCase__ ) )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda __lowercase : x.__name__ )
def snake_case (__lowercase ) -> List[str]:
'''simple docstring'''
_snake_case : List[Any] = []
_snake_case : Optional[int] = get_test_module(UpperCamelCase__ )
for attr in dir(UpperCamelCase__ ):
_snake_case : Any = getattr(UpperCamelCase__ , UpperCamelCase__ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
_snake_case : Tuple = getattr(UpperCamelCase__ , "all_model_classes" , [] )
if len(UpperCamelCase__ ) > 0:
test_classes.append(UpperCamelCase__ )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda __lowercase : x.__name__ )
def snake_case (__lowercase ) -> List[Any]:
'''simple docstring'''
_snake_case : Optional[Any] = get_test_classes(UpperCamelCase__ )
_snake_case : Tuple = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda __lowercase : x.__name__ )
def snake_case (__lowercase ) -> Any:
'''simple docstring'''
_snake_case : List[Any] = test_class()
if hasattr(UpperCamelCase__ , "setUp" ):
test.setUp()
_snake_case : int = None
if hasattr(UpperCamelCase__ , "model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
_snake_case : Union[str, Any] = test.model_tester.__class__
return model_tester
def snake_case (__lowercase , __lowercase ) -> Tuple:
'''simple docstring'''
_snake_case : Tuple = get_test_classes(UpperCamelCase__ )
_snake_case : Union[str, Any] = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(UpperCamelCase__ )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda __lowercase : x.__name__ )
def snake_case (__lowercase , __lowercase ) -> List[str]:
'''simple docstring'''
_snake_case : Tuple = get_test_classes_for_model(UpperCamelCase__ , UpperCamelCase__ )
_snake_case : Union[str, Any] = []
for test_class in test_classes:
_snake_case : Any = get_model_tester_from_test_class(UpperCamelCase__ )
if tester_class is not None:
tester_classes.append(UpperCamelCase__ )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda __lowercase : x.__name__ )
def snake_case (__lowercase ) -> int:
'''simple docstring'''
_snake_case : Optional[Any] = get_test_classes(UpperCamelCase__ )
_snake_case : Optional[int] = {test_class: get_model_tester_from_test_class(UpperCamelCase__ ) for test_class in test_classes}
return test_tester_mapping
def snake_case (__lowercase ) -> str:
'''simple docstring'''
_snake_case : Any = get_model_classes(UpperCamelCase__ )
_snake_case : List[Any] = {
model_class: get_test_classes_for_model(UpperCamelCase__ , UpperCamelCase__ ) for model_class in model_classes
}
return model_test_mapping
def snake_case (__lowercase ) -> Optional[Any]:
'''simple docstring'''
_snake_case : int = get_model_classes(UpperCamelCase__ )
_snake_case : int = {
model_class: get_tester_classes_for_model(UpperCamelCase__ , UpperCamelCase__ ) for model_class in model_classes
}
return model_to_tester_mapping
def snake_case (__lowercase ) -> int:
'''simple docstring'''
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return o
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return o.__name__
elif isinstance(UpperCamelCase__ , (list, tuple) ):
return [to_json(UpperCamelCase__ ) for x in o]
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return {to_json(UpperCamelCase__ ): to_json(UpperCamelCase__ ) for k, v in o.items()}
else:
return o | 670 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase__ = 16
UpperCAmelCase__ = 32
def _A( UpperCamelCase__ : Accelerator , UpperCamelCase__ : int = 16 ) -> int:
'''simple docstring'''
__lowercase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__lowercase = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(UpperCamelCase__ : Tuple ):
# max_length=None => use the model max length (it's actually the default)
__lowercase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowercase = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowercase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(UpperCamelCase__ : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowercase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowercase = 16
elif accelerator.mixed_precision != "no":
__lowercase = 8
else:
__lowercase = None
return tokenizer.pad(
UpperCamelCase__ , padding='''longest''' , max_length=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
__lowercase = DataLoader(
tokenized_datasets['''train'''] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
__lowercase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase__ = mocked_dataloaders # noqa: F811
def _A( UpperCamelCase__ : str , UpperCamelCase__ : Tuple ) -> Optional[int]:
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , UpperCamelCase__ ) == "1":
__lowercase = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
__lowercase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
__lowercase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowercase = config['''lr''']
__lowercase = int(config['''num_epochs'''] )
__lowercase = int(config['''seed'''] )
__lowercase = int(config['''batch_size'''] )
set_seed(UpperCamelCase__ )
__lowercase , __lowercase = get_dataloaders(UpperCamelCase__ , UpperCamelCase__ )
__lowercase = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
__lowercase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__lowercase = batch_size // MAX_GPU_BATCH_SIZE
__lowercase = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowercase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=UpperCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowercase = model.to(accelerator.device )
# Instantiate optimizer
__lowercase = AdamW(params=model.parameters() , lr=UpperCamelCase__ )
# Instantiate scheduler
__lowercase = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase__ , num_warmup_steps=100 , num_training_steps=(len(UpperCamelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
__lowercase = os.path.split(UpperCamelCase__ )[-1].split('''.''' )[0]
accelerator.init_trackers(UpperCamelCase__ , UpperCamelCase__ )
# Now we train the model
for epoch in range(UpperCamelCase__ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
__lowercase = 0
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__lowercase = model(**UpperCamelCase__ )
__lowercase = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
__lowercase = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
__lowercase = model(**UpperCamelCase__ )
__lowercase = outputs.logits.argmax(dim=-1 )
__lowercase , __lowercase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=UpperCamelCase__ , references=UpperCamelCase__ , )
__lowercase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , UpperCamelCase__ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(UpperCamelCase__ ),
'''epoch''': epoch,
} , step=UpperCamelCase__ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def _A( ) -> str:
'''simple docstring'''
__lowercase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=UpperCamelCase__ , default=UpperCamelCase__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=UpperCamelCase__ , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
__lowercase = parser.parse_args()
__lowercase = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 332 | 0 |
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a : Optional[Any] = logging.get_logger(__name__)
a : Optional[int] = '''▁'''
a : Dict = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
}
a : Tuple = {
'''vocab_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'''
),
},
'''spm_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'''
)
},
}
a : Dict = {
'''facebook/s2t-small-librispeech-asr''': 1_0_2_4,
}
a : Optional[int] = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de''']
a : List[Any] = {'''mustc''': MUSTC_LANGS}
class a_ ( _UpperCAmelCase ):
a : Optional[int] = VOCAB_FILES_NAMES
a : Tuple = PRETRAINED_VOCAB_FILES_MAP
a : Optional[int] = MAX_MODEL_INPUT_SIZES
a : Tuple = ['input_ids', 'attention_mask']
a : List[int] = []
def __init__( self : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[str]="<s>" , __UpperCamelCase : Tuple="</s>" , __UpperCamelCase : Any="<pad>" , __UpperCamelCase : Optional[int]="<unk>" , __UpperCamelCase : Any=False , __UpperCamelCase : Dict=False , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : Optional[Dict[str, Any]] = None , **__UpperCamelCase : List[str] , ) ->None:
'''simple docstring'''
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , do_upper_case=__UpperCamelCase , do_lower_case=__UpperCamelCase , tgt_lang=__UpperCamelCase , lang_codes=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
_UpperCAmelCase = do_upper_case
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = load_json(__UpperCamelCase )
_UpperCAmelCase = {v: k for k, v in self.encoder.items()}
_UpperCAmelCase = spm_file
_UpperCAmelCase = load_spm(__UpperCamelCase , self.sp_model_kwargs )
if lang_codes is not None:
_UpperCAmelCase = lang_codes
_UpperCAmelCase = LANGUAGES[lang_codes]
_UpperCAmelCase = [f"""<lang:{lang}>""" for lang in self.langs]
_UpperCAmelCase = {lang: self.sp_model.PieceToId(f"""<lang:{lang}>""" ) for lang in self.langs}
_UpperCAmelCase = self.lang_tokens
_UpperCAmelCase = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
_UpperCAmelCase = {}
@property
def _snake_case ( self : int ) ->int:
'''simple docstring'''
return len(self.encoder )
@property
def _snake_case ( self : List[str] ) ->str:
'''simple docstring'''
return self._tgt_lang
@tgt_lang.setter
def _snake_case ( self : int , __UpperCamelCase : Any ) ->None:
'''simple docstring'''
_UpperCAmelCase = new_tgt_lang
self.set_tgt_lang_special_tokens(__UpperCamelCase )
def _snake_case ( self : Optional[Any] , __UpperCamelCase : str ) ->None:
'''simple docstring'''
_UpperCAmelCase = self.lang_code_to_id[tgt_lang]
_UpperCAmelCase = [lang_code_id]
def _snake_case ( self : List[Any] , __UpperCamelCase : str ) ->List[str]:
'''simple docstring'''
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def _snake_case ( self : str , __UpperCamelCase : List[str] ) ->Optional[int]:
'''simple docstring'''
return self.encoder.get(__UpperCamelCase , self.encoder[self.unk_token] )
def _snake_case ( self : Optional[int] , __UpperCamelCase : int ) ->str:
'''simple docstring'''
return self.decoder.get(__UpperCamelCase , self.unk_token )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : List[str] ) ->str:
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
_UpperCAmelCase = self.sp_model.decode(__UpperCamelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
_UpperCAmelCase = []
else:
current_sub_tokens.append(__UpperCamelCase )
_UpperCAmelCase = self.sp_model.decode(__UpperCamelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def _snake_case ( self : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any]=None ) ->List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def _snake_case ( self : Dict , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : bool = False ) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
_UpperCAmelCase = [1] * len(self.prefix_tokens )
_UpperCAmelCase = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__UpperCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(__UpperCamelCase )) + ([0] * len(__UpperCamelCase )) + suffix_ones
def _snake_case ( self : int ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
return state
def __setstate__( self : Union[str, Any] , __UpperCamelCase : Dict ) ->None:
'''simple docstring'''
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_UpperCAmelCase = {}
_UpperCAmelCase = load_spm(self.spm_file , self.sp_model_kwargs )
def _snake_case ( self : List[str] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ) ->Tuple[str]:
'''simple docstring'''
_UpperCAmelCase = Path(__UpperCamelCase )
assert save_dir.is_dir(), f"""{save_directory} should be a directory"""
_UpperCAmelCase = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
_UpperCAmelCase = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , __UpperCamelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __UpperCamelCase )
elif not os.path.isfile(self.spm_file ):
with open(__UpperCamelCase , """wb""" ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (str(__UpperCamelCase ), str(__UpperCamelCase ))
def _UpperCamelCase ( _A , _A ) -> sentencepiece.SentencePieceProcessor:
"""simple docstring"""
_UpperCAmelCase = sentencepiece.SentencePieceProcessor(**_A )
spm.Load(str(_A ) )
return spm
def _UpperCamelCase ( _A ) -> Union[Dict, List]:
"""simple docstring"""
with open(_A , """r""" ) as f:
return json.load(_A )
def _UpperCamelCase ( _A , _A ) -> None:
"""simple docstring"""
with open(_A , """w""" ) as f:
json.dump(_A , _A , indent=2 ) | 19 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def _UpperCamelCase ( _A , _A , _A , _A , _A ) -> np.array:
"""simple docstring"""
_UpperCAmelCase = int(np.ceil((x_end - xa) / step_size ) )
_UpperCAmelCase = np.zeros((n + 1,) )
_UpperCAmelCase = ya
_UpperCAmelCase = xa
for k in range(_A ):
_UpperCAmelCase = y[k] + step_size * ode_func(_A , y[k] )
_UpperCAmelCase = y[k] + (
(step_size / 2) * (ode_func(_A , y[k] ) + ode_func(x + step_size , _A ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 19 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/config.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/config.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"""
),
"""distilbert-base-uncased-finetuned-sst-2-english""": (
"""https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"""
),
}
class _snake_case ( __UpperCAmelCase):
UpperCamelCase__ : List[Any] ="""distilbert"""
UpperCamelCase__ : List[str] ={
"""hidden_size""": """dim""",
"""num_attention_heads""": """n_heads""",
"""num_hidden_layers""": """n_layers""",
}
def __init__( self : List[str], __lowercase : Any=3_0522, __lowercase : Optional[int]=512, __lowercase : int=False, __lowercase : Optional[Any]=6, __lowercase : List[Any]=12, __lowercase : Dict=768, __lowercase : str=4 * 768, __lowercase : Union[str, Any]=0.1, __lowercase : List[str]=0.1, __lowercase : Optional[int]="gelu", __lowercase : List[Any]=0.02, __lowercase : Optional[Any]=0.1, __lowercase : Any=0.2, __lowercase : List[Any]=0, **__lowercase : Any, ):
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = sinusoidal_pos_embds
lowercase__ = n_layers
lowercase__ = n_heads
lowercase__ = dim
lowercase__ = hidden_dim
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation
lowercase__ = initializer_range
lowercase__ = qa_dropout
lowercase__ = seq_classif_dropout
super().__init__(**UpperCamelCase_, pad_token_id=UpperCamelCase_ )
class _snake_case ( __UpperCAmelCase):
@property
def A__ ( self : Any ):
if self.task == "multiple-choice":
lowercase__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 413 |
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__lowerCamelCase = ""
__lowerCamelCase = ""
__lowerCamelCase = ""
__lowerCamelCase = 1 # (0 is vertical, 1 is horizontal)
def lowercase ( ) -> None:
__magic_name__ , __magic_name__ = get_dataset(__UpperCamelCase , __UpperCamelCase )
print('''Processing...''' )
__magic_name__ , __magic_name__ , __magic_name__ = update_image_and_anno(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
for index, image in enumerate(__UpperCamelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__magic_name__ = random_chars(32 )
__magic_name__ = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__magic_name__ = f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(f'''/{file_root}.jpg''' , __UpperCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Success {index+1}/{len(__UpperCamelCase )} with {file_name}''' )
__magic_name__ = []
for anno in new_annos[index]:
__magic_name__ = f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(__UpperCamelCase )
with open(f'''/{file_root}.txt''' , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def lowercase ( __UpperCamelCase , __UpperCamelCase ) -> tuple[list, list]:
__magic_name__ = []
__magic_name__ = []
for label_file in glob.glob(os.path.join(__UpperCamelCase , '''*.txt''' ) ):
__magic_name__ = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(__UpperCamelCase ) as in_file:
__magic_name__ = in_file.readlines()
__magic_name__ = os.path.join(__UpperCamelCase , f'''{label_name}.jpg''' )
__magic_name__ = []
for obj_list in obj_lists:
__magic_name__ = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__UpperCamelCase )
labels.append(__UpperCamelCase )
return img_paths, labels
def lowercase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 ) -> tuple[list, list, list]:
__magic_name__ = []
__magic_name__ = []
__magic_name__ = []
for idx in range(len(__UpperCamelCase ) ):
__magic_name__ = []
__magic_name__ = img_list[idx]
path_list.append(__UpperCamelCase )
__magic_name__ = anno_list[idx]
__magic_name__ = cva.imread(__UpperCamelCase )
if flip_type == 1:
__magic_name__ = cva.flip(__UpperCamelCase , __UpperCamelCase )
for bbox in img_annos:
__magic_name__ = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__magic_name__ = cva.flip(__UpperCamelCase , __UpperCamelCase )
for bbox in img_annos:
__magic_name__ = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__UpperCamelCase )
new_imgs_list.append(__UpperCamelCase )
return new_imgs_list, new_annos_lists, path_list
def lowercase ( __UpperCamelCase = 32 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
__magic_name__ = ascii_lowercase + digits
return "".join(random.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 490 | 0 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = 0
@slow
def lowercase ( self ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(UpperCAmelCase_ ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(UpperCAmelCase_ ) , 0 )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
# Check that tokenizer_type ≠ model_type
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(UpperCAmelCase_ , config=UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(UpperCAmelCase_ , "vocab.txt" ) )
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(UpperCAmelCase_ , tokenizer_type="bert" , use_fast=UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json" , os.path.join(UpperCAmelCase_ , "vocab.json" ) )
shutil.copy("./tests/fixtures/merges.txt" , os.path.join(UpperCAmelCase_ , "merges.txt" ) )
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(UpperCAmelCase_ , tokenizer_type="gpt2" , use_fast=UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
@require_tokenizers
def lowercase ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(UpperCAmelCase_ , "vocab.txt" ) )
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(UpperCAmelCase_ , tokenizer_type="bert" )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json" , os.path.join(UpperCAmelCase_ , "vocab.json" ) )
shutil.copy("./tests/fixtures/merges.txt" , os.path.join(UpperCAmelCase_ , "merges.txt" ) )
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(UpperCAmelCase_ , tokenizer_type="gpt2" )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase ( self ):
with pytest.raises(UpperCAmelCase_ ):
AutoTokenizer.from_pretrained("./" , tokenizer_type="xxx" )
@require_tokenizers
def lowercase ( self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
_SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained("wietsedv/bert-base-dutch-cased" )
self.assertIsInstance(UpperCAmelCase_ , (BertTokenizer, BertTokenizerFast) )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , UpperCAmelCase_ )
else:
self.assertEqual(tokenizer.do_lower_case , UpperCAmelCase_ )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def lowercase ( self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
UpperCAmelCase_ , "julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier" , ):
_SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained("julien-c/herlolip-not-exists" )
def lowercase ( self ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
_SCREAMING_SNAKE_CASE = TOKENIZER_MAPPING.values()
_SCREAMING_SNAKE_CASE = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(UpperCAmelCase_ )
@require_tokenizers
def lowercase ( self ):
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" , use_fast=UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" ) , UpperCAmelCase_ )
@require_tokenizers
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("distilbert-base-uncased" , do_lower_case=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = "Hello, world. How are you?"
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(UpperCAmelCase_ )
self.assertEqual("[UNK]" , tokens[0] )
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("microsoft/mpnet-base" , do_lower_case=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(UpperCAmelCase_ )
self.assertEqual("[UNK]" , tokens[0] )
@require_tokenizers
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("robot-test/dummy-tokenizer-fast-with-model-config" )
self.assertEqual(type(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30_000 )
self.assertEqual(tokenizer.unk_token , "[UNK]" )
self.assertEqual(tokenizer.padding_side , "right" )
self.assertEqual(tokenizer.truncation_side , "right" )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("ctrl" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase ( self ):
# Check we can load the tokenizer config of an online model.
_SCREAMING_SNAKE_CASE = get_tokenizer_config("bert-base-cased" )
_SCREAMING_SNAKE_CASE = config.pop("_commit_hash" , UpperCAmelCase_ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(UpperCAmelCase_ , {"do_lower_case": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
_SCREAMING_SNAKE_CASE = get_tokenizer_config(UpperCAmelCase_ )
self.assertDictEqual(UpperCAmelCase_ , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = get_tokenizer_config(UpperCAmelCase_ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["tokenizer_class"] , "BertTokenizer" )
def lowercase ( self ):
try:
AutoConfig.register("custom" , UpperCAmelCase_ )
AutoTokenizer.register(UpperCAmelCase_ , slow_tokenizer_class=UpperCAmelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase_ ):
AutoTokenizer.register(UpperCAmelCase_ , slow_tokenizer_class=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = CustomTokenizer.from_pretrained(UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowercase ( self ):
try:
AutoConfig.register("custom" , UpperCAmelCase_ )
# Can register in two steps
AutoTokenizer.register(UpperCAmelCase_ , slow_tokenizer_class=UpperCAmelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(UpperCAmelCase_ , fast_tokenizer_class=UpperCAmelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
UpperCAmelCase_ , slow_tokenizer_class=UpperCAmelCase_ , fast_tokenizer_class=UpperCAmelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase_ ):
AutoTokenizer.register(UpperCAmelCase_ , fast_tokenizer_class=UpperCAmelCase_ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
_SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained(UpperCAmelCase_ )
bert_tokenizer.save_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = CustomTokenizerFast.from_pretrained(UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(UpperCAmelCase_ , use_fast=UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=UpperCAmelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(UpperCAmelCase_ , trust_remote_code=UpperCAmelCase_ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=UpperCAmelCase_ , use_fast=UpperCAmelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(UpperCAmelCase_ , trust_remote_code=UpperCAmelCase_ , use_fast=UpperCAmelCase_ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer" )
@require_tokenizers
def lowercase ( self ):
class lowerCAmelCase ( __UpperCAmelCase ):
a : Any = False
class lowerCAmelCase ( __UpperCAmelCase ):
a : List[str] = NewTokenizer
a : List[Any] = False
try:
AutoConfig.register("custom" , UpperCAmelCase_ )
AutoTokenizer.register(UpperCAmelCase_ , slow_tokenizer_class=UpperCAmelCase_ )
AutoTokenizer.register(UpperCAmelCase_ , fast_tokenizer_class=UpperCAmelCase_ )
# If remote code is not set, the default is to use local
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertFalse(tokenizer.special_attribute_present )
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , use_fast=UpperCAmelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertFalse(tokenizer.special_attribute_present )
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=UpperCAmelCase_ , use_fast=UpperCAmelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertTrue(tokenizer.special_attribute_present )
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=UpperCAmelCase_ , use_fast=UpperCAmelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=UpperCAmelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=UpperCAmelCase_ , use_fast=UpperCAmelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def lowercase ( self ):
with self.assertRaisesRegex(
UpperCAmelCase_ , "bert-base is not a local folder and is not a valid model identifier" ):
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("bert-base" )
def lowercase ( self ):
with self.assertRaisesRegex(
UpperCAmelCase_ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(UpperCAmelCase_ , revision="aaaaaa" )
def lowercase ( self ):
# Make sure we have cached the tokenizer.
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 ) | 711 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
a : Optional[int] = KandinskyImgaImgPipeline
a : Union[str, Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
a : Union[str, Any] = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
a : int = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a : Tuple = False
@property
def lowercase ( self ):
return 32
@property
def lowercase ( self ):
return 32
@property
def lowercase ( self ):
return self.time_input_dim
@property
def lowercase ( self ):
return self.time_input_dim * 4
@property
def lowercase ( self ):
return 100
@property
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def lowercase ( self ):
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
_SCREAMING_SNAKE_CASE = MultilingualCLIP(UpperCamelCase )
_SCREAMING_SNAKE_CASE = text_encoder.eval()
return text_encoder
@property
def lowercase ( self ):
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
_SCREAMING_SNAKE_CASE = UNetaDConditionModel(**UpperCamelCase )
return model
@property
def lowercase ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase ( self ):
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = self.dummy_text_encoder
_SCREAMING_SNAKE_CASE = self.dummy_tokenizer
_SCREAMING_SNAKE_CASE = self.dummy_unet
_SCREAMING_SNAKE_CASE = self.dummy_movq
_SCREAMING_SNAKE_CASE = {
"num_train_timesteps": 1_000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
_SCREAMING_SNAKE_CASE = DDIMScheduler(**UpperCamelCase )
_SCREAMING_SNAKE_CASE = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowercase ( self , UpperCamelCase , UpperCamelCase=0 ):
_SCREAMING_SNAKE_CASE = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
_SCREAMING_SNAKE_CASE = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCamelCase )
# create init_image
_SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
_SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(UpperCamelCase ) ).convert("RGB" ).resize((256, 256) )
if str(UpperCamelCase ).startswith("mps" ):
_SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCamelCase )
else:
_SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
_SCREAMING_SNAKE_CASE = {
"prompt": "horse",
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = "cpu"
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = self.pipeline_class(**UpperCamelCase )
_SCREAMING_SNAKE_CASE = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs(UpperCamelCase ) )
_SCREAMING_SNAKE_CASE = output.images
_SCREAMING_SNAKE_CASE = pipe(
**self.get_dummy_inputs(UpperCamelCase ) , return_dict=UpperCamelCase , )[0]
_SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE = np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_img2img_frog.npy" )
_SCREAMING_SNAKE_CASE = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
_SCREAMING_SNAKE_CASE = "A red cartoon frog, 4k"
_SCREAMING_SNAKE_CASE = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase )
_SCREAMING_SNAKE_CASE = KandinskyImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE = pipeline.to(UpperCamelCase )
pipeline.set_progress_bar_config(disable=UpperCamelCase )
_SCREAMING_SNAKE_CASE = torch.Generator(device="cpu" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = pipe_prior(
UpperCamelCase , generator=UpperCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
_SCREAMING_SNAKE_CASE = pipeline(
UpperCamelCase , image=UpperCamelCase , image_embeds=UpperCamelCase , negative_image_embeds=UpperCamelCase , generator=UpperCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
_SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase ) | 493 | 0 |
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=100 , UpperCAmelCase=13 , UpperCAmelCase=30 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=32 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=10 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , ) -> Optional[int]:
'''simple docstring'''
__snake_case : Optional[int] = parent
__snake_case : int = vocab_size
__snake_case : Union[str, Any] = batch_size
__snake_case : str = image_size
__snake_case : str = patch_size
__snake_case : Optional[Any] = num_channels
__snake_case : int = is_training
__snake_case : List[str] = use_labels
__snake_case : Any = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : str = intermediate_size
__snake_case : List[Any] = hidden_act
__snake_case : Tuple = hidden_dropout_prob
__snake_case : List[str] = attention_probs_dropout_prob
__snake_case : Tuple = type_sequence_label_size
__snake_case : int = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__snake_case : List[str] = (image_size // patch_size) ** 2
__snake_case : List[str] = num_patches + 1
def UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Optional[int] = None
if self.use_labels:
__snake_case : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Union[str, Any] = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
'''simple docstring'''
__snake_case : Optional[Any] = FlaxBeitModel(config=lowerCAmelCase__ )
__snake_case : Optional[int] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[Any] = FlaxBeitForMaskedImageModeling(config=lowerCAmelCase__ )
__snake_case : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
'''simple docstring'''
__snake_case : str = self.type_sequence_label_size
__snake_case : List[str] = FlaxBeitForImageClassification(config=lowerCAmelCase__ )
__snake_case : Optional[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__snake_case : List[str] = 1
__snake_case : Dict = FlaxBeitForImageClassification(lowerCAmelCase__ )
__snake_case : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case : Union[str, Any] = model(lowerCAmelCase__ )
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case : int = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Optional[int] = config_and_inputs
__snake_case : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class _lowerCamelCase ( a , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] =(
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def UpperCAmelCase ( self ) -> None:
'''simple docstring'''
__snake_case : Tuple = FlaxBeitModelTester(self )
__snake_case : int = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Dict = model_class(lowerCAmelCase__ )
__snake_case : Optional[int] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Optional[Any] = [*signature.parameters.keys()]
__snake_case : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case , __snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__snake_case : Union[str, Any] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
__snake_case : Optional[Any] = model_class(lowerCAmelCase__ )
@jax.jit
def model_jitted(UpperCAmelCase , **UpperCAmelCase ):
return model(pixel_values=lowerCAmelCase__ , **lowerCAmelCase__ )
with self.subTest("JIT Enabled" ):
__snake_case : List[str] = model_jitted(**lowerCAmelCase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__snake_case : str = model_jitted(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for jitted_output, output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__ )
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__snake_case : List[str] = model_class_name.from_pretrained("microsoft/beit-base-patch16-224" )
__snake_case : List[Any] = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(lowerCAmelCase__ )
def lowerCAmelCase__( ) -> Optional[Any]:
__snake_case : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@require_flax
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case : List[Any] = FlaxBeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" )
__snake_case : Any = self.default_image_processor
__snake_case : Optional[int] = prepare_img()
__snake_case : str = image_processor(images=lowerCAmelCase__ , return_tensors="np" ).pixel_values
# prepare bool_masked_pos
__snake_case : int = np.ones((1, 196) , dtype=lowerCAmelCase__ )
# forward pass
__snake_case : Any = model(pixel_values=lowerCAmelCase__ , bool_masked_pos=lowerCAmelCase__ )
__snake_case : Tuple = outputs.logits
# verify the logits
__snake_case : int = (1, 196, 8192)
self.assertEqual(logits.shape , lowerCAmelCase__ )
__snake_case : List[Any] = np.array(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , lowerCAmelCase__ , atol=1E-2 ) )
@slow
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case : int = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" )
__snake_case : Optional[Any] = self.default_image_processor
__snake_case : Optional[int] = prepare_img()
__snake_case : int = image_processor(images=lowerCAmelCase__ , return_tensors="np" )
# forward pass
__snake_case : Optional[int] = model(**lowerCAmelCase__ )
__snake_case : List[str] = outputs.logits
# verify the logits
__snake_case : List[Any] = (1, 1000)
self.assertEqual(logits.shape , lowerCAmelCase__ )
__snake_case : Optional[int] = np.array([-1.2_385, -1.0_987, -1.0_108] )
self.assertTrue(np.allclose(logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
__snake_case : str = 281
self.assertEqual(logits.argmax(-1 ).item() , lowerCAmelCase__ )
@slow
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case : str = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" )
__snake_case : Optional[Any] = self.default_image_processor
__snake_case : Dict = prepare_img()
__snake_case : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors="np" )
# forward pass
__snake_case : int = model(**lowerCAmelCase__ )
__snake_case : List[str] = outputs.logits
# verify the logits
__snake_case : Dict = (1, 21841)
self.assertEqual(logits.shape , lowerCAmelCase__ )
__snake_case : Optional[int] = np.array([1.6_881, -0.2_787, 0.5_901] )
self.assertTrue(np.allclose(logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
__snake_case : Union[str, Any] = 2396
self.assertEqual(logits.argmax(-1 ).item() , lowerCAmelCase__ )
| 243 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowercase__ : str = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
lowercase__ : str = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
lowercase__ : Dict = R'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def snake_case__ ( self : Dict ) -> Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = 0.0
for i, j in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
n_correct += 1.0 if math_equivalence.is_equiv(lowerCAmelCase__ , lowerCAmelCase__ ) else 0.0
_UpperCamelCase = n_correct / len(lowerCAmelCase__ )
return {
"accuracy": accuracy,
}
| 98 | 0 |
"""simple docstring"""
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ) ->int:
"""simple docstring"""
while a != 0:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = b % a, a
return b
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ) ->int:
"""simple docstring"""
if gcd(UpperCAmelCase_ , UpperCAmelCase_ ) != 1:
__UpperCAmelCase : int = f'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(UpperCAmelCase_ )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[str] = 1, 0, a
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = 0, 1, m
while va != 0:
__UpperCAmelCase : Optional[Any] = ua // va
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m | 374 |
"""simple docstring"""
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
lowercase__ :int = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None) | 374 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : List[Any] = namedtuple("""result""" , """name value""" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("""Only one argument must be 0""" )
elif power < 0:
raise ValueError(
"""Power cannot be negative in any electrical/electronics system""" )
elif voltage == 0:
return result("""voltage""" , power / current )
elif current == 0:
return result("""current""" , power / voltage )
elif power == 0:
return result("""power""" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 414 |
'''simple docstring'''
from __future__ import annotations
def __A ( lowerCAmelCase_ ):
_UpperCAmelCase : List[Any] = len(lowerCAmelCase_ )
# We need to create solution object to save path.
_UpperCAmelCase : Optional[int] = [[0 for _ in range(lowerCAmelCase_ )] for _ in range(lowerCAmelCase_ )]
_UpperCAmelCase : Tuple = run_maze(lowerCAmelCase_ , 0 , 0 , lowerCAmelCase_ )
if solved:
print("""\n""".join(str(lowerCAmelCase_ ) for row in solutions ) )
else:
print("""No solution exists!""" )
return solved
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : int = len(lowerCAmelCase_ )
# Final check point.
if i == j == (size - 1):
_UpperCAmelCase : Any = 1
return True
_UpperCAmelCase : Union[str, Any] = (not i < 0) and (not j < 0) # Check lower bounds
_UpperCAmelCase : Union[str, Any] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
_UpperCAmelCase : Any = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
_UpperCAmelCase : Any = 1
# check for directions
if (
run_maze(lowerCAmelCase_ , i + 1 , lowerCAmelCase_ , lowerCAmelCase_ )
or run_maze(lowerCAmelCase_ , lowerCAmelCase_ , j + 1 , lowerCAmelCase_ )
or run_maze(lowerCAmelCase_ , i - 1 , lowerCAmelCase_ , lowerCAmelCase_ )
or run_maze(lowerCAmelCase_ , lowerCAmelCase_ , j - 1 , lowerCAmelCase_ )
):
return True
_UpperCAmelCase : int = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 414 | 1 |
"""simple docstring"""
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
__lowercase = '''scheduler_config.json'''
class _lowercase ( __UpperCAmelCase ):
"""simple docstring"""
lowercase__ = 1
lowercase__ = 2
lowercase__ = 3
lowercase__ = 4
lowercase__ = 5
@dataclass
class _lowercase ( __UpperCAmelCase ):
"""simple docstring"""
lowercase__ = 42
class _lowercase :
"""simple docstring"""
lowercase__ = SCHEDULER_CONFIG_NAME
lowercase__ = ["dtype"]
lowercase__ = []
lowercase__ = True
@classmethod
def UpperCAmelCase_ ( cls : Any , UpperCamelCase__ : Dict[str, Any] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : List[str]=False , **UpperCamelCase__ : int , ) -> Tuple:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase =cls.load_config(
pretrained_model_name_or_path=_lowerCamelCase , subfolder=_lowerCamelCase , return_unused_kwargs=_lowerCamelCase , **_lowerCamelCase , )
__UpperCamelCase , __UpperCamelCase =cls.from_config(_lowerCamelCase , return_unused_kwargs=_lowerCamelCase , **_lowerCamelCase )
if hasattr(_lowerCamelCase , '''create_state''' ) and getattr(_lowerCamelCase , '''has_state''' , _lowerCamelCase ):
__UpperCamelCase =scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCAmelCase_ ( self : Optional[int] , UpperCamelCase__ : Union[str, os.PathLike] , UpperCamelCase__ : bool = False , **UpperCamelCase__ : int ) -> List[Any]:
'''simple docstring'''
self.save_config(save_directory=_lowerCamelCase , push_to_hub=_lowerCamelCase , **_lowerCamelCase )
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
return self._get_compatibles()
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] ) -> Tuple:
'''simple docstring'''
__UpperCamelCase =list(set([cls.__name__] + cls._compatibles ) )
__UpperCamelCase =importlib.import_module(__name__.split('''.''' )[0] )
__UpperCamelCase =[
getattr(_lowerCamelCase , _lowerCamelCase ) for c in compatible_classes_str if hasattr(_lowerCamelCase , _lowerCamelCase )
]
return compatible_classes
def lowerCAmelCase (__UpperCamelCase : int , __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
assert len(_lowerCAmelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_lowerCAmelCase ) - x.ndim) ) , _lowerCAmelCase )
def lowerCAmelCase (__UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any]=0.9_9_9 , __UpperCamelCase : Union[str, Any]=jnp.floataa ):
"""simple docstring"""
def alpha_bar(__UpperCamelCase : Optional[int] ):
return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
__UpperCamelCase =[]
for i in range(_lowerCAmelCase ):
__UpperCamelCase =i / num_diffusion_timesteps
__UpperCamelCase =(i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_lowerCAmelCase ) / alpha_bar(_lowerCAmelCase ) , _lowerCAmelCase ) )
return jnp.array(_lowerCAmelCase , dtype=_lowerCAmelCase )
@flax.struct.dataclass
class _lowercase :
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
@classmethod
def UpperCAmelCase_ ( cls : str , UpperCamelCase__ : int ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase =scheduler.config
if config.trained_betas is not None:
__UpperCamelCase =jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
__UpperCamelCase =jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__UpperCamelCase =(
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__UpperCamelCase =betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""" )
__UpperCamelCase =1.0 - betas
__UpperCamelCase =jnp.cumprod(_lowerCamelCase , axis=0 )
return cls(
alphas=_lowerCamelCase , betas=_lowerCamelCase , alphas_cumprod=_lowerCamelCase , )
def lowerCAmelCase (__UpperCamelCase : Any , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] ):
"""simple docstring"""
__UpperCamelCase =state.alphas_cumprod
__UpperCamelCase =alphas_cumprod[timesteps] ** 0.5
__UpperCamelCase =sqrt_alpha_prod.flatten()
__UpperCamelCase =broadcast_to_shape_from_left(_lowerCAmelCase , original_samples.shape )
__UpperCamelCase =(1 - alphas_cumprod[timesteps]) ** 0.5
__UpperCamelCase =sqrt_one_minus_alpha_prod.flatten()
__UpperCamelCase =broadcast_to_shape_from_left(_lowerCAmelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def lowerCAmelCase (__UpperCamelCase : str , __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase =get_sqrt_alpha_prod(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__UpperCamelCase =sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def lowerCAmelCase (__UpperCamelCase : Any , __UpperCamelCase : str , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase =get_sqrt_alpha_prod(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__UpperCamelCase =sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 701 | """simple docstring"""
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__lowercase = 299_792_458
# Symbols
__lowercase , __lowercase , __lowercase , __lowercase = symbols('''ct x y z''')
def lowerCAmelCase (__UpperCamelCase : float ):
"""simple docstring"""
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def lowerCAmelCase (__UpperCamelCase : float ):
"""simple docstring"""
return 1 / sqrt(1 - beta(__UpperCamelCase ) ** 2 )
def lowerCAmelCase (__UpperCamelCase : float ):
"""simple docstring"""
return np.array(
[
[gamma(__UpperCamelCase ), -gamma(__UpperCamelCase ) * beta(__UpperCamelCase ), 0, 0],
[-gamma(__UpperCamelCase ) * beta(__UpperCamelCase ), gamma(__UpperCamelCase ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowerCAmelCase (__UpperCamelCase : float , __UpperCamelCase : np.ndarray | None = None ):
"""simple docstring"""
if event is None:
__UpperCamelCase =np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(__UpperCamelCase ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__lowercase = transform(29_979_245)
print('''Example of four vector: ''')
print(f'''ct\' = {four_vector[0]}''')
print(f'''x\' = {four_vector[1]}''')
print(f'''y\' = {four_vector[2]}''')
print(f'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
__lowercase = {ct: c, x: 1, y: 1, z: 1}
__lowercase = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f'''\n{numerical_vector}''')
| 296 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A ( metaclass=SCREAMING_SNAKE_CASE__ ):
snake_case__ :int = ['onnx']
def __init__( self : Dict , *__magic_name__ : Optional[Any] , **__magic_name__ : Optional[Any] ):
"""simple docstring"""
requires_backends(self , ["onnx"] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , *__magic_name__ : str , **__magic_name__ : List[str] ):
"""simple docstring"""
requires_backends(cls , ["onnx"] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : List[Any] , *__magic_name__ : Any , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ["onnx"] )
| 48 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase__ : List[Any] = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Union[str, Any] = ["EncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = ["TFEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] = ["FlaxEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCAmelCase__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 48 | 1 |
from __future__ import annotations
import typing
from collections import Counter
def __lowercase ( _UpperCAmelCase ) -> typing.Counter[int]:
'''simple docstring'''
__lowercase = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(_UpperCAmelCase , max_perimeter + 1 ):
__lowercase = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(_UpperCAmelCase ):
__lowercase = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def __lowercase ( _UpperCAmelCase = 1_000 ) -> int:
'''simple docstring'''
__lowercase = pythagorean_triple(_UpperCAmelCase )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F"Perimeter {solution()} has maximum solutions")
| 711 | from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class snake_case ( __snake_case ):
"""simple docstring"""
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
class snake_case ( __snake_case ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=512 , lowerCAmelCase_="cls" , lowerCAmelCase_=False , lowerCAmelCase_=True , **lowerCAmelCase_ , ):
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
__lowercase = project_dim
__lowercase = pooler_fn
__lowercase = learn_encoder
__lowercase = use_attention_mask
class snake_case ( __snake_case ):
"""simple docstring"""
__lowerCAmelCase = [r"""pooler""", r"""logit_scale"""]
__lowerCAmelCase = [r"""position_ids""", r"""predictions.decoder.bias"""]
__lowerCAmelCase = """roberta"""
__lowerCAmelCase = RobertaSeriesConfig
def __init__( self , lowerCAmelCase_ ):
super().__init__(lowerCAmelCase_ )
__lowercase = XLMRobertaModel(lowerCAmelCase_ )
__lowercase = nn.Linear(config.hidden_size , config.project_dim )
__lowercase = getattr(lowerCAmelCase_ , "has_pre_transformation" , lowerCAmelCase_ )
if self.has_pre_transformation:
__lowercase = nn.Linear(config.hidden_size , config.project_dim )
__lowercase = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def snake_case__ ( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , ):
__lowercase = return_dict if return_dict is not None else self.config.use_return_dict
__lowercase = self.base_model(
input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , position_ids=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , inputs_embeds=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , output_attentions=lowerCAmelCase_ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=lowerCAmelCase_ , )
if self.has_pre_transformation:
__lowercase = outputs["hidden_states"][-2]
__lowercase = self.pre_LN(lowerCAmelCase_ )
__lowercase = self.transformation_pre(lowerCAmelCase_ )
return TransformationModelOutput(
projection_state=lowerCAmelCase_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
__lowercase = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=lowerCAmelCase_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 576 | 0 |
class UpperCAmelCase :
def __init__( self : List[Any] , lowerCAmelCase : list[int] ):
lowercase : Union[str, Any] = len(lowerCAmelCase )
lowercase : int = [0] * len_array
if len_array > 0:
lowercase : Optional[Any] = array[0]
for i in range(1 , lowerCAmelCase ):
lowercase : Dict = self.prefix_sum[i - 1] + array[i]
def _lowerCAmelCase ( self : int , lowerCAmelCase : int , lowerCAmelCase : int ):
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase : int ):
lowercase : Tuple = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(lowerCAmelCase )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 583 |
from math import factorial, pi
def lowerCamelCase_ ( UpperCAmelCase_ : float , UpperCAmelCase_ : int = 30 ):
if not isinstance(UpperCAmelCase_ , (int, float) ):
raise ValueError('''maclaurin_sin() requires either an int or float for theta''' )
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or accuracy <= 0:
raise ValueError('''maclaurin_sin() requires a positive int for accuracy''' )
lowercase : Tuple = float(UpperCAmelCase_ )
lowercase : List[str] = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(UpperCAmelCase_ ) )
def lowerCamelCase_ ( UpperCAmelCase_ : float , UpperCAmelCase_ : int = 30 ):
if not isinstance(UpperCAmelCase_ , (int, float) ):
raise ValueError('''maclaurin_cos() requires either an int or float for theta''' )
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or accuracy <= 0:
raise ValueError('''maclaurin_cos() requires a positive int for accuracy''' )
lowercase : Optional[Any] = float(UpperCAmelCase_ )
lowercase : Dict = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(UpperCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 583 | 1 |
import os
from math import logaa
def _lowerCAmelCase ( __magic_name__ :str = "base_exp.txt" ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(__magic_name__ ) , __magic_name__ ) ) ):
UpperCAmelCase_, UpperCAmelCase_ = list(map(__magic_name__ , line.split(''',''' ) ) )
if x * logaa(__magic_name__ ) > largest:
UpperCAmelCase_ = x * logaa(__magic_name__ )
UpperCAmelCase_ = i + 1
return result
if __name__ == "__main__":
print(solution())
| 407 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_lowerCamelCase : Dict = {
'vocab_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/vocab.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/vocab.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/vocab.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/vocab.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/vocab.json',
},
'merges_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/merges.txt',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/merges.txt',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/merges.txt',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/merges.txt',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/merges.txt',
},
'tokenizer_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/tokenizer.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/tokenizer.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/tokenizer.json',
},
}
_lowerCamelCase : Union[str, Any] = {
'gpt2': 1024,
'gpt2-medium': 1024,
'gpt2-large': 1024,
'gpt2-xl': 1024,
'distilgpt2': 1024,
}
class snake_case__ ( __snake_case ):
'''simple docstring'''
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
__A = GPTaTokenizer
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Dict="<|endoftext|>" , lowerCAmelCase_ : List[str]="<|endoftext|>" , lowerCAmelCase_ : str="<|endoftext|>" , lowerCAmelCase_ : List[str]=False , **lowerCAmelCase_ : str , ) -> int:
super().__init__(
lowerCAmelCase_ , lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , **lowerCAmelCase_ , )
UpperCAmelCase_ = kwargs.pop('''add_bos_token''' , lowerCAmelCase_ )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowerCAmelCase_ ) != add_prefix_space:
UpperCAmelCase_ = getattr(lowerCAmelCase_ , pre_tok_state.pop('''type''' ) )
UpperCAmelCase_ = add_prefix_space
UpperCAmelCase_ = pre_tok_class(**lowerCAmelCase_ )
UpperCAmelCase_ = add_prefix_space
def UpperCamelCase ( self : Tuple , *lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : Tuple ) -> BatchEncoding:
UpperCAmelCase_ = kwargs.get('''is_split_into_words''' , lowerCAmelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_ )
def UpperCamelCase ( self : Dict , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : Optional[Any] ) -> BatchEncoding:
UpperCAmelCase_ = kwargs.get('''is_split_into_words''' , lowerCAmelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_ )
def UpperCamelCase ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase_ = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
def UpperCamelCase ( self : Optional[Any] , lowerCAmelCase_ : "Conversation" ) -> List[int]:
UpperCAmelCase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) + [self.eos_token_id] )
if len(lowerCAmelCase_ ) > self.model_max_length:
UpperCAmelCase_ = input_ids[-self.model_max_length :]
return input_ids
| 407 | 1 |
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class __lowercase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=sys.maxsize ) -> Any:
A : List[Any] = '''bilinear'''
A : List[str] = max_size
A : str = short_edge_length
def __call__( self , __UpperCAmelCase ) -> str:
A : Tuple = []
for img in imgs:
A , A : str = img.shape[:2]
# later: provide list and randomly choose index for resize
A : Optional[int] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
A : Union[str, Any] = size * 1.0 / min(__UpperCAmelCase , __UpperCAmelCase )
if h < w:
A , A : Union[str, Any] = size, scale * w
else:
A , A : List[Any] = scale * h, size
if max(__UpperCAmelCase , __UpperCAmelCase ) > self.max_size:
A : Optional[Any] = self.max_size * 1.0 / max(__UpperCAmelCase , __UpperCAmelCase )
A : Optional[Any] = newh * scale
A : Optional[int] = neww * scale
A : List[Any] = int(neww + 0.5 )
A : Tuple = int(newh + 0.5 )
if img.dtype == np.uinta:
A : List[Any] = Image.fromarray(__UpperCAmelCase )
A : Union[str, Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
A : Union[str, Any] = np.asarray(__UpperCAmelCase )
else:
A : Dict = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
A : Dict = nn.functional.interpolate(
__UpperCAmelCase , (newh, neww) , mode=self.interp_method , align_corners=__UpperCAmelCase ).squeeze(0 )
img_augs.append(__UpperCAmelCase )
return img_augs
class __lowercase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase ) -> List[str]:
A : Tuple = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
A : Optional[int] = cfg.INPUT.FORMAT
A : Union[str, Any] = cfg.SIZE_DIVISIBILITY
A : List[Any] = cfg.PAD_VALUE
A : str = cfg.INPUT.MAX_SIZE_TEST
A : Optional[int] = cfg.MODEL.DEVICE
A : List[Any] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
A : Optional[Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
A : Tuple = lambda __UpperCAmelCase : (x - self.pixel_mean) / self.pixel_std
def snake_case ( self , __UpperCAmelCase ) -> Optional[int]:
A : Optional[int] = tuple(max(__UpperCAmelCase ) for s in zip(*[img.shape for img in images] ) )
A : Optional[Any] = [im.shape[-2:] for im in images]
A : Optional[Any] = [
nn.functional.pad(
__UpperCAmelCase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(__UpperCAmelCase , __UpperCAmelCase )
]
return torch.stack(__UpperCAmelCase ), torch.tensor(__UpperCAmelCase )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase=False ) -> Tuple:
with torch.no_grad():
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
A : List[str] = [images]
if single_image:
assert len(__UpperCAmelCase ) == 1
for i in range(len(__UpperCAmelCase ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(__UpperCAmelCase , images.pop(__UpperCAmelCase ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
__UpperCAmelCase , torch.as_tensor(img_tensorize(images.pop(__UpperCAmelCase ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
A : Any = torch.tensor([im.shape[:2] for im in images] )
A : Tuple = self.aug(__UpperCAmelCase )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
A : Optional[int] = [self.normalizer(__UpperCAmelCase ) for x in images]
# now pad them to do the following operations
A , A : List[str] = self.pad(__UpperCAmelCase )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
A : Optional[Any] = torch.true_divide(__UpperCAmelCase , __UpperCAmelCase )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ):
assert torch.isfinite(lowerCamelCase_ ).all(), "Box tensor contains infinite or NaN!"
A , A : List[Any] = box_size
tensor[:, 0].clamp_(min=0 , max=lowerCamelCase_ )
tensor[:, 1].clamp_(min=0 , max=lowerCamelCase_ )
tensor[:, 2].clamp_(min=0 , max=lowerCamelCase_ )
tensor[:, 3].clamp_(min=0 , max=lowerCamelCase_ )
| 542 |
def snake_case__ ( lowerCamelCase_ = 1000 ):
return sum(e for e in range(3 , lowerCamelCase_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"{solution() = }")
| 542 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class snake_case ( __UpperCAmelCase ):
lowerCamelCase__ = '''marian'''
lowerCamelCase__ = ['''past_key_values''']
lowerCamelCase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self :int , _lowerCamelCase :List[Any]=5_8_1_0_1 , _lowerCamelCase :Union[str, Any]=None , _lowerCamelCase :Tuple=1_0_2_4 , _lowerCamelCase :Optional[int]=1_2 , _lowerCamelCase :str=4_0_9_6 , _lowerCamelCase :Optional[Any]=1_6 , _lowerCamelCase :int=1_2 , _lowerCamelCase :List[str]=4_0_9_6 , _lowerCamelCase :int=1_6 , _lowerCamelCase :List[str]=0.0 , _lowerCamelCase :int=0.0 , _lowerCamelCase :Optional[int]=True , _lowerCamelCase :str=True , _lowerCamelCase :int="gelu" , _lowerCamelCase :Optional[int]=1_0_2_4 , _lowerCamelCase :Optional[Any]=0.1 , _lowerCamelCase :List[str]=0.0 , _lowerCamelCase :Optional[int]=0.0 , _lowerCamelCase :List[Any]=0.0_2 , _lowerCamelCase :Tuple=5_8_1_0_0 , _lowerCamelCase :Any=False , _lowerCamelCase :Any=5_8_1_0_0 , _lowerCamelCase :Optional[Any]=0 , _lowerCamelCase :Any=0 , _lowerCamelCase :Any=True , **_lowerCamelCase :List[str] , ):
__SCREAMING_SNAKE_CASE : str = vocab_size
__SCREAMING_SNAKE_CASE : int = decoder_vocab_size or vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[Any] = d_model
__SCREAMING_SNAKE_CASE : Optional[Any] = encoder_ffn_dim
__SCREAMING_SNAKE_CASE : Any = encoder_layers
__SCREAMING_SNAKE_CASE : Optional[int] = encoder_attention_heads
__SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_ffn_dim
__SCREAMING_SNAKE_CASE : Dict = decoder_layers
__SCREAMING_SNAKE_CASE : Tuple = decoder_attention_heads
__SCREAMING_SNAKE_CASE : List[str] = dropout
__SCREAMING_SNAKE_CASE : Any = attention_dropout
__SCREAMING_SNAKE_CASE : Union[str, Any] = activation_dropout
__SCREAMING_SNAKE_CASE : Any = activation_function
__SCREAMING_SNAKE_CASE : int = init_std
__SCREAMING_SNAKE_CASE : Tuple = encoder_layerdrop
__SCREAMING_SNAKE_CASE : str = decoder_layerdrop
__SCREAMING_SNAKE_CASE : Optional[Any] = use_cache
__SCREAMING_SNAKE_CASE : List[str] = encoder_layers
__SCREAMING_SNAKE_CASE : Any = scale_embedding # scale factor will be sqrt(d_model) if True
__SCREAMING_SNAKE_CASE : Union[str, Any] = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , forced_eos_token_id=_lowerCamelCase , **_lowerCamelCase , )
class snake_case ( __UpperCAmelCase ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE : List[str] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch'''}
__SCREAMING_SNAKE_CASE : Any = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__SCREAMING_SNAKE_CASE : Any = {0: '''batch''', 1: '''decoder_sequence'''}
__SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_lowerCamelCase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = self.num_layers
for i in range(_lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Tuple = {0: '''batch''', 2: '''past_sequence + sequence'''}
__SCREAMING_SNAKE_CASE : int = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
__SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE : List[str] = super().outputs
else:
__SCREAMING_SNAKE_CASE : int = super(_lowerCamelCase , self ).outputs
if self.use_past:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = self.num_layers
for i in range(_lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
__SCREAMING_SNAKE_CASE : Tuple = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def SCREAMING_SNAKE_CASE_ ( self :Dict , _lowerCamelCase :PreTrainedTokenizer , _lowerCamelCase :int = -1 , _lowerCamelCase :int = -1 , _lowerCamelCase :bool = False , _lowerCamelCase :Optional[TensorType] = None , ):
__SCREAMING_SNAKE_CASE : str = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Generate decoder inputs
__SCREAMING_SNAKE_CASE : Tuple = seq_length if not self.use_past else 1
__SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
__SCREAMING_SNAKE_CASE : Union[str, Any] = dict(**_lowerCamelCase , **_lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = common_inputs['''input_ids'''].shape
__SCREAMING_SNAKE_CASE : Tuple = common_inputs['''decoder_input_ids'''].shape[1]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = self.num_attention_heads
__SCREAMING_SNAKE_CASE : Any = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__SCREAMING_SNAKE_CASE : Tuple = decoder_seq_length + 3
__SCREAMING_SNAKE_CASE : str = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__SCREAMING_SNAKE_CASE : List[Any] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(_lowerCamelCase , _lowerCamelCase )] , dim=1 )
__SCREAMING_SNAKE_CASE : List[str] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = self.num_layers
__SCREAMING_SNAKE_CASE : int = min(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Any = max(_lowerCamelCase , _lowerCamelCase ) - min_num_layers
__SCREAMING_SNAKE_CASE : Any = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(_lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowerCamelCase ),
torch.zeros(_lowerCamelCase ),
torch.zeros(_lowerCamelCase ),
torch.zeros(_lowerCamelCase ),
) )
# TODO: test this.
__SCREAMING_SNAKE_CASE : Tuple = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(_lowerCamelCase , _lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) )
return common_inputs
def SCREAMING_SNAKE_CASE_ ( self :str , _lowerCamelCase :PreTrainedTokenizer , _lowerCamelCase :int = -1 , _lowerCamelCase :int = -1 , _lowerCamelCase :bool = False , _lowerCamelCase :Optional[TensorType] = None , ):
__SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__SCREAMING_SNAKE_CASE : str = seqlen + 2
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = self.num_layers
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = self.num_attention_heads
__SCREAMING_SNAKE_CASE : str = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__SCREAMING_SNAKE_CASE : Tuple = common_inputs['''attention_mask'''].dtype
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(_lowerCamelCase , _lowerCamelCase , dtype=_lowerCamelCase )] , dim=1 )
__SCREAMING_SNAKE_CASE : Tuple = [
(torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) for _ in range(_lowerCamelCase )
]
return common_inputs
def SCREAMING_SNAKE_CASE_ ( self :int , _lowerCamelCase :PreTrainedTokenizer , _lowerCamelCase :int = -1 , _lowerCamelCase :int = -1 , _lowerCamelCase :bool = False , _lowerCamelCase :Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__SCREAMING_SNAKE_CASE : Optional[int] = compute_effective_axis_dimension(
_lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.num_special_tokens_to_add(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Any = compute_effective_axis_dimension(
_lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
__SCREAMING_SNAKE_CASE : List[str] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
__SCREAMING_SNAKE_CASE : Any = dict(tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase ) )
return common_inputs
def SCREAMING_SNAKE_CASE_ ( self :List[str] , _lowerCamelCase :PreTrainedTokenizer , _lowerCamelCase :int = -1 , _lowerCamelCase :int = -1 , _lowerCamelCase :bool = False , _lowerCamelCase :Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_inputs_for_causal_lm(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
return common_inputs
def SCREAMING_SNAKE_CASE_ ( self :Any , _lowerCamelCase :Dict , _lowerCamelCase :List[str] , _lowerCamelCase :Dict , _lowerCamelCase :Union[str, Any] ):
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE : List[str] = super()._flatten_past_key_values_(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = super(_lowerCamelCase , self )._flatten_past_key_values_(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
@property
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
return 1e-4
| 401 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class snake_case ( __UpperCAmelCase , __UpperCAmelCase ):
lowerCamelCase__ = '''nat'''
lowerCamelCase__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self :Any , _lowerCamelCase :int=4 , _lowerCamelCase :List[str]=3 , _lowerCamelCase :Optional[int]=6_4 , _lowerCamelCase :Optional[Any]=[3, 4, 6, 5] , _lowerCamelCase :Optional[int]=[2, 4, 8, 1_6] , _lowerCamelCase :str=7 , _lowerCamelCase :int=3.0 , _lowerCamelCase :Optional[Any]=True , _lowerCamelCase :List[str]=0.0 , _lowerCamelCase :str=0.0 , _lowerCamelCase :int=0.1 , _lowerCamelCase :int="gelu" , _lowerCamelCase :Dict=0.0_2 , _lowerCamelCase :str=1e-5 , _lowerCamelCase :List[Any]=0.0 , _lowerCamelCase :Optional[Any]=None , _lowerCamelCase :Dict=None , **_lowerCamelCase :Union[str, Any] , ):
super().__init__(**_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size
__SCREAMING_SNAKE_CASE : int = num_channels
__SCREAMING_SNAKE_CASE : List[str] = embed_dim
__SCREAMING_SNAKE_CASE : List[str] = depths
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = num_heads
__SCREAMING_SNAKE_CASE : Any = kernel_size
__SCREAMING_SNAKE_CASE : Tuple = mlp_ratio
__SCREAMING_SNAKE_CASE : Union[str, Any] = qkv_bias
__SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Any = drop_path_rate
__SCREAMING_SNAKE_CASE : Dict = hidden_act
__SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps
__SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__SCREAMING_SNAKE_CASE : List[Any] = int(embed_dim * 2 ** (len(_lowerCamelCase ) - 1) )
__SCREAMING_SNAKE_CASE : Any = layer_scale_init_value
__SCREAMING_SNAKE_CASE : Tuple = ['''stem'''] + [f'''stage{idx}''' for idx in range(1 , len(_lowerCamelCase ) + 1 )]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
| 401 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Optional[Any] = {
"""configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ["""ConvNextFeatureExtractor"""]
__UpperCamelCase : List[str] = ["""ConvNextImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
"""CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvNextForImageClassification""",
"""ConvNextModel""",
"""ConvNextPreTrainedModel""",
"""ConvNextBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = [
"""TFConvNextForImageClassification""",
"""TFConvNextModel""",
"""TFConvNextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 80 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__UpperCamelCase : Union[str, Any] = False
class __UpperCamelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : Any ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
__lowercase = torch.manual_seed(0 )
__lowercase = pipe.dual_guided(
prompt="""first prompt""" , image=_lowerCAmelCase , text_to_image_strength=0.75 , generator=_lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_lowerCAmelCase )
__lowercase = VersatileDiffusionPipeline.from_pretrained(_lowerCAmelCase , torch_dtype=torch.floataa )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = generator.manual_seed(0 )
__lowercase = pipe.dual_guided(
prompt="""first prompt""" , image=_lowerCAmelCase , text_to_image_strength=0.75 , generator=_lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def _a ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """cyberpunk 2077"""
__lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
__lowercase = torch.manual_seed(0 )
__lowercase = pipe.dual_guided(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , text_to_image_strength=0.75 , generator=_lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
__lowercase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
__lowercase = """A painting of a squirrel eating a burger """
__lowercase = torch.manual_seed(0 )
__lowercase = pipe.text_to_image(
prompt=_lowerCAmelCase , generator=_lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
__lowercase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
__lowercase = pipe.image_variation(_lowerCAmelCase , generator=_lowerCAmelCase , output_type="""numpy""" ).images
__lowercase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 80 | 1 |
from __future__ import annotations
class __SCREAMING_SNAKE_CASE :
def __init__( self : str , __UpperCamelCase : int ):
_UpperCAmelCase = order
# a_{0} ... a_{k}
_UpperCAmelCase = [1.0] + [0.0] * order
# b_{0} ... b_{k}
_UpperCAmelCase = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
_UpperCAmelCase = [0.0] * self.order
# y[n-1] ... y[n-k]
_UpperCAmelCase = [0.0] * self.order
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : list[float] , __UpperCamelCase : list[float] ):
if len(__UpperCamelCase ) < self.order:
_UpperCAmelCase = [1.0, *a_coeffs]
if len(__UpperCamelCase ) != self.order + 1:
_UpperCAmelCase = (
F'''Expected a_coeffs to have {self.order + 1} elements '''
F'''for {self.order}-order filter, got {len(__UpperCamelCase )}'''
)
raise ValueError(__UpperCamelCase )
if len(__UpperCamelCase ) != self.order + 1:
_UpperCAmelCase = (
F'''Expected b_coeffs to have {self.order + 1} elements '''
F'''for {self.order}-order filter, got {len(__UpperCamelCase )}'''
)
raise ValueError(__UpperCamelCase )
_UpperCAmelCase = a_coeffs
_UpperCAmelCase = b_coeffs
def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : float ):
_UpperCAmelCase = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
_UpperCAmelCase = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
_UpperCAmelCase = self.input_history[:-1]
_UpperCAmelCase = self.output_history[:-1]
_UpperCAmelCase = sample
_UpperCAmelCase = result
return result
| 719 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__lowerCAmelCase = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__lowerCAmelCase = "UperNetConfig"
class __SCREAMING_SNAKE_CASE ( nn.Module):
def __init__( self : Dict , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : Union[int, Tuple[int, int]] , __UpperCamelCase : Union[int, Tuple[int, int], str] = 0 , __UpperCamelCase : bool = False , __UpperCamelCase : Union[int, Tuple[int, int]] = 1 , ):
super().__init__()
_UpperCAmelCase = nn.Convad(
in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , kernel_size=__UpperCamelCase , padding=__UpperCamelCase , bias=__UpperCamelCase , dilation=__UpperCamelCase , )
_UpperCAmelCase = nn.BatchNormad(__UpperCamelCase )
_UpperCAmelCase = nn.ReLU()
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : torch.Tensor ):
_UpperCAmelCase = self.conv(__UpperCamelCase )
_UpperCAmelCase = self.batch_norm(__UpperCamelCase )
_UpperCAmelCase = self.activation(__UpperCamelCase )
return output
class __SCREAMING_SNAKE_CASE ( nn.Module):
def __init__( self : str , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ):
super().__init__()
_UpperCAmelCase = [
nn.AdaptiveAvgPoolad(__UpperCamelCase ),
UperNetConvModule(__UpperCamelCase , __UpperCamelCase , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(__UpperCamelCase ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : torch.Tensor ):
_UpperCAmelCase = input
for layer in self.layers:
_UpperCAmelCase = layer(__UpperCamelCase )
return hidden_state
class __SCREAMING_SNAKE_CASE ( nn.Module):
def __init__( self : Dict , __UpperCamelCase : Tuple[int, ...] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : bool ):
super().__init__()
_UpperCAmelCase = pool_scales
_UpperCAmelCase = align_corners
_UpperCAmelCase = in_channels
_UpperCAmelCase = channels
_UpperCAmelCase = []
for i, pool_scale in enumerate(__UpperCamelCase ):
_UpperCAmelCase = UperNetPyramidPoolingBlock(pool_scale=__UpperCamelCase , in_channels=__UpperCamelCase , channels=__UpperCamelCase )
self.blocks.append(__UpperCamelCase )
self.add_module(str(__UpperCamelCase ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : torch.Tensor ):
_UpperCAmelCase = []
for ppm in self.blocks:
_UpperCAmelCase = ppm(__UpperCamelCase )
_UpperCAmelCase = nn.functional.interpolate(
__UpperCamelCase , size=x.size()[2:] , mode="bilinear" , align_corners=self.align_corners )
ppm_outs.append(__UpperCamelCase )
return ppm_outs
class __SCREAMING_SNAKE_CASE ( nn.Module):
def __init__( self : Tuple , __UpperCamelCase : int , __UpperCamelCase : Tuple ):
super().__init__()
_UpperCAmelCase = config
_UpperCAmelCase = config.pool_scales # e.g. (1, 2, 3, 6)
_UpperCAmelCase = in_channels
_UpperCAmelCase = config.hidden_size
_UpperCAmelCase = False
_UpperCAmelCase = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
_UpperCAmelCase = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
_UpperCAmelCase = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
_UpperCAmelCase = nn.ModuleList()
_UpperCAmelCase = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
_UpperCAmelCase = UperNetConvModule(__UpperCamelCase , self.channels , kernel_size=1 )
_UpperCAmelCase = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(__UpperCamelCase )
self.fpn_convs.append(__UpperCamelCase )
_UpperCAmelCase = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def UpperCAmelCase__ ( self : str ):
self.apply(self._init_weights )
def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : str ):
if isinstance(__UpperCamelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = inputs[-1]
_UpperCAmelCase = [x]
psp_outs.extend(self.psp_modules(__UpperCamelCase ) )
_UpperCAmelCase = torch.cat(__UpperCamelCase , dim=1 )
_UpperCAmelCase = self.bottleneck(__UpperCamelCase )
return output
def UpperCAmelCase__ ( self : Any , __UpperCamelCase : torch.Tensor ):
# build laterals
_UpperCAmelCase = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(__UpperCamelCase ) )
# build top-down path
_UpperCAmelCase = len(__UpperCamelCase )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
_UpperCAmelCase = laterals[i - 1].shape[2:]
_UpperCAmelCase = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=__UpperCamelCase , mode="bilinear" , align_corners=self.align_corners )
# build outputs
_UpperCAmelCase = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
_UpperCAmelCase = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="bilinear" , align_corners=self.align_corners )
_UpperCAmelCase = torch.cat(__UpperCamelCase , dim=1 )
_UpperCAmelCase = self.fpn_bottleneck(__UpperCamelCase )
_UpperCAmelCase = self.classifier(__UpperCamelCase )
return output
class __SCREAMING_SNAKE_CASE ( nn.Module):
def __init__( self : Dict , __UpperCamelCase : str , __UpperCamelCase : int = 2 , __UpperCamelCase : int = 3 , __UpperCamelCase : Union[int, Tuple[int, int]] = 1 ):
super().__init__()
_UpperCAmelCase = config
_UpperCAmelCase = config.auxiliary_in_channels
_UpperCAmelCase = config.auxiliary_channels
_UpperCAmelCase = config.auxiliary_num_convs
_UpperCAmelCase = config.auxiliary_concat_input
_UpperCAmelCase = in_index
_UpperCAmelCase = (kernel_size // 2) * dilation
_UpperCAmelCase = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=__UpperCamelCase , padding=__UpperCamelCase , dilation=__UpperCamelCase ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=__UpperCamelCase , padding=__UpperCamelCase , dilation=__UpperCamelCase ) )
if self.num_convs == 0:
_UpperCAmelCase = nn.Identity()
else:
_UpperCAmelCase = nn.Sequential(*__UpperCamelCase )
if self.concat_input:
_UpperCAmelCase = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=__UpperCamelCase , padding=kernel_size // 2 )
_UpperCAmelCase = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def UpperCAmelCase__ ( self : List[Any] ):
self.apply(self._init_weights )
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Optional[Any] ):
if isinstance(__UpperCamelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : torch.Tensor ):
# just take the relevant feature maps
_UpperCAmelCase = encoder_hidden_states[self.in_index]
_UpperCAmelCase = self.convs(__UpperCamelCase )
if self.concat_input:
_UpperCAmelCase = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
_UpperCAmelCase = self.classifier(__UpperCamelCase )
return output
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : Dict = UperNetConfig
__SCREAMING_SNAKE_CASE : str = """pixel_values"""
__SCREAMING_SNAKE_CASE : str = True
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : int ):
if isinstance(__UpperCamelCase , __UpperCamelCase ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def UpperCAmelCase__ ( self : Union[str, Any] ):
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple=False ):
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCAmelCase = value
__lowerCAmelCase = r"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__lowerCAmelCase = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"""UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""" , lowercase , )
class __SCREAMING_SNAKE_CASE ( lowercase):
def __init__( self : Optional[int] , __UpperCamelCase : str ):
super().__init__(__UpperCamelCase )
_UpperCAmelCase = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
_UpperCAmelCase = UperNetHead(__UpperCamelCase , in_channels=self.backbone.channels )
_UpperCAmelCase = UperNetFCNHead(__UpperCamelCase ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("batch_size, sequence_length" ) )
@replace_return_docstrings(output_type=__UpperCamelCase , config_class=_CONFIG_FOR_DOC )
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Optional[torch.Tensor] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[torch.Tensor] = None , __UpperCamelCase : Optional[bool] = None , ):
_UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase = output_attentions if output_attentions is not None else self.config.output_attentions
_UpperCAmelCase = self.backbone.forward_with_filtered_kwargs(
__UpperCamelCase , output_hidden_states=__UpperCamelCase , output_attentions=__UpperCamelCase )
_UpperCAmelCase = outputs.feature_maps
_UpperCAmelCase = self.decode_head(__UpperCamelCase )
_UpperCAmelCase = nn.functional.interpolate(__UpperCamelCase , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=__UpperCamelCase )
_UpperCAmelCase = None
if self.auxiliary_head is not None:
_UpperCAmelCase = self.auxiliary_head(__UpperCamelCase )
_UpperCAmelCase = nn.functional.interpolate(
__UpperCamelCase , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=__UpperCamelCase )
_UpperCAmelCase = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("The number of labels should be greater than one" )
else:
# compute weighted loss
_UpperCAmelCase = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
_UpperCAmelCase = loss_fct(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = loss_fct(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
_UpperCAmelCase = (logits,) + outputs[1:]
else:
_UpperCAmelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=__UpperCamelCase , logits=__UpperCamelCase , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 129 | 0 |
"""simple docstring"""
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
UpperCAmelCase = re.compile(r"""\b(a|an|the)\b""", re.UNICODE)
UpperCAmelCase = None
def lowercase ( ) -> List[Any]:
_UpperCamelCase = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' )
parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' )
parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' )
parser.add_argument(
'''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' )
parser.add_argument(
'''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' )
parser.add_argument(
'''--na-prob-thresh''' , '''-t''' , type=a__ , default=1.0 , help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' , )
parser.add_argument(
'''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=a__ , help='''Save precision-recall curves to directory.''' )
parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def lowercase ( a__ : int ) -> List[Any]:
_UpperCamelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_UpperCamelCase = bool(qa['''answers''']['''text'''] )
return qid_to_has_ans
def lowercase ( a__ : Dict ) -> Optional[int]:
def remove_articles(a__ : Dict ):
return ARTICLES_REGEX.sub(''' ''' , a__ )
def white_space_fix(a__ : Tuple ):
return " ".join(text.split() )
def remove_punc(a__ : Optional[Any] ):
_UpperCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(a__ : List[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(a__ ) ) ) )
def lowercase ( a__ : List[str] ) -> List[str]:
if not s:
return []
return normalize_answer(a__ ).split()
def lowercase ( a__ : str , a__ : int ) -> str:
return int(normalize_answer(a__ ) == normalize_answer(a__ ) )
def lowercase ( a__ : List[str] , a__ : Optional[Any] ) -> int:
_UpperCamelCase = get_tokens(a__ )
_UpperCamelCase = get_tokens(a__ )
_UpperCamelCase = collections.Counter(a__ ) & collections.Counter(a__ )
_UpperCamelCase = sum(common.values() )
if len(a__ ) == 0 or len(a__ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
_UpperCamelCase = 1.0 * num_same / len(a__ )
_UpperCamelCase = 1.0 * num_same / len(a__ )
_UpperCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def lowercase ( a__ : Optional[int] , a__ : Dict ) -> Tuple:
_UpperCamelCase = {}
_UpperCamelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_UpperCamelCase = qa['''id''']
_UpperCamelCase = [t for t in qa['''answers''']['''text'''] if normalize_answer(a__ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
_UpperCamelCase = ['''''']
if qid not in preds:
print(F'''Missing prediction for {qid}''' )
continue
_UpperCamelCase = preds[qid]
# Take max over all gold answers
_UpperCamelCase = max(compute_exact(a__ , a__ ) for a in gold_answers )
_UpperCamelCase = max(compute_fa(a__ , a__ ) for a in gold_answers )
return exact_scores, fa_scores
def lowercase ( a__ : Tuple , a__ : List[str] , a__ : Tuple , a__ : Union[str, Any] ) -> List[str]:
_UpperCamelCase = {}
for qid, s in scores.items():
_UpperCamelCase = na_probs[qid] > na_prob_thresh
if pred_na:
_UpperCamelCase = float(not qid_to_has_ans[qid] )
else:
_UpperCamelCase = s
return new_scores
def lowercase ( a__ : Optional[Any] , a__ : List[Any] , a__ : List[str]=None ) -> int:
if not qid_list:
_UpperCamelCase = len(a__ )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores.values() ) / total),
('''f1''', 100.0 * sum(fa_scores.values() ) / total),
('''total''', total),
] )
else:
_UpperCamelCase = len(a__ )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('''f1''', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('''total''', total),
] )
def lowercase ( a__ : Optional[Any] , a__ : Any , a__ : Tuple ) -> str:
for k in new_eval:
_UpperCamelCase = new_eval[k]
def lowercase ( a__ : Optional[Any] , a__ : int , a__ : Optional[Any] , a__ : List[Any] ) -> Any:
plt.step(a__ , a__ , color='''b''' , alpha=0.2 , where='''post''' )
plt.fill_between(a__ , a__ , step='''post''' , alpha=0.2 , color='''b''' )
plt.xlabel('''Recall''' )
plt.ylabel('''Precision''' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(a__ )
plt.savefig(a__ )
plt.clf()
def lowercase ( a__ : Tuple , a__ : Dict , a__ : Dict , a__ : List[str] , a__ : Optional[int]=None , a__ : Any=None ) -> List[str]:
_UpperCamelCase = sorted(a__ , key=lambda a__ : na_probs[k] )
_UpperCamelCase = 0.0
_UpperCamelCase = 1.0
_UpperCamelCase = 0.0
_UpperCamelCase = [1.0]
_UpperCamelCase = [0.0]
_UpperCamelCase = 0.0
for i, qid in enumerate(a__ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
_UpperCamelCase = true_pos / float(i + 1 )
_UpperCamelCase = true_pos / float(a__ )
if i == len(a__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(a__ )
recalls.append(a__ )
if out_image:
plot_pr_curve(a__ , a__ , a__ , a__ )
return {"ap": 100.0 * avg_prec}
def lowercase ( a__ : Any , a__ : Optional[Any] , a__ : Optional[int] , a__ : Union[str, Any] , a__ : Optional[int] , a__ : List[Any] ) -> Union[str, Any]:
if out_image_dir and not os.path.exists(a__ ):
os.makedirs(a__ )
_UpperCamelCase = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
_UpperCamelCase = make_precision_recall_eval(
a__ , a__ , a__ , a__ , out_image=os.path.join(a__ , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , )
_UpperCamelCase = make_precision_recall_eval(
a__ , a__ , a__ , a__ , out_image=os.path.join(a__ , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , )
_UpperCamelCase = {k: float(a__ ) for k, v in qid_to_has_ans.items()}
_UpperCamelCase = make_precision_recall_eval(
a__ , a__ , a__ , a__ , out_image=os.path.join(a__ , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , )
merge_eval(a__ , a__ , '''pr_exact''' )
merge_eval(a__ , a__ , '''pr_f1''' )
merge_eval(a__ , a__ , '''pr_oracle''' )
def lowercase ( a__ : str , a__ : Optional[Any] , a__ : int , a__ : Optional[Any] ) -> int:
if not qid_list:
return
_UpperCamelCase = [na_probs[k] for k in qid_list]
_UpperCamelCase = np.ones_like(a__ ) / float(len(a__ ) )
plt.hist(a__ , weights=a__ , bins=20 , range=(0.0, 1.0) )
plt.xlabel('''Model probability of no-answer''' )
plt.ylabel('''Proportion of dataset''' )
plt.title(F'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(a__ , F'''na_prob_hist_{name}.png''' ) )
plt.clf()
def lowercase ( a__ : Optional[int] , a__ : str , a__ : Optional[int] , a__ : List[Any] ) -> List[Any]:
_UpperCamelCase = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
_UpperCamelCase = num_no_ans
_UpperCamelCase = cur_score
_UpperCamelCase = 0.0
_UpperCamelCase = sorted(a__ , key=lambda a__ : na_probs[k] )
for i, qid in enumerate(a__ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
_UpperCamelCase = scores[qid]
else:
if preds[qid]:
_UpperCamelCase = -1
else:
_UpperCamelCase = 0
cur_score += diff
if cur_score > best_score:
_UpperCamelCase = cur_score
_UpperCamelCase = na_probs[qid]
return 100.0 * best_score / len(a__ ), best_thresh
def lowercase ( a__ : List[Any] , a__ : Any , a__ : Dict , a__ : List[Any] , a__ : Optional[Any] , a__ : Optional[int] ) -> Union[str, Any]:
_UpperCamelCase , _UpperCamelCase = find_best_thresh(a__ , a__ , a__ , a__ )
_UpperCamelCase , _UpperCamelCase = find_best_thresh(a__ , a__ , a__ , a__ )
_UpperCamelCase = best_exact
_UpperCamelCase = exact_thresh
_UpperCamelCase = best_fa
_UpperCamelCase = fa_thresh
def lowercase ( ) -> Optional[int]:
with open(OPTS.data_file ) as f:
_UpperCamelCase = json.load(a__ )
_UpperCamelCase = dataset_json['''data''']
with open(OPTS.pred_file ) as f:
_UpperCamelCase = json.load(a__ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
_UpperCamelCase = json.load(a__ )
else:
_UpperCamelCase = {k: 0.0 for k in preds}
_UpperCamelCase = make_qid_to_has_ans(a__ ) # maps qid to True/False
_UpperCamelCase = [k for k, v in qid_to_has_ans.items() if v]
_UpperCamelCase = [k for k, v in qid_to_has_ans.items() if not v]
_UpperCamelCase , _UpperCamelCase = get_raw_scores(a__ , a__ )
_UpperCamelCase = apply_no_ans_threshold(a__ , a__ , a__ , OPTS.na_prob_thresh )
_UpperCamelCase = apply_no_ans_threshold(a__ , a__ , a__ , OPTS.na_prob_thresh )
_UpperCamelCase = make_eval_dict(a__ , a__ )
if has_ans_qids:
_UpperCamelCase = make_eval_dict(a__ , a__ , qid_list=a__ )
merge_eval(a__ , a__ , '''HasAns''' )
if no_ans_qids:
_UpperCamelCase = make_eval_dict(a__ , a__ , qid_list=a__ )
merge_eval(a__ , a__ , '''NoAns''' )
if OPTS.na_prob_file:
find_all_best_thresh(a__ , a__ , a__ , a__ , a__ , a__ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(a__ , a__ , a__ , a__ , a__ , OPTS.out_image_dir )
histogram_na_prob(a__ , a__ , OPTS.out_image_dir , '''hasAns''' )
histogram_na_prob(a__ , a__ , OPTS.out_image_dir , '''noAns''' )
if OPTS.out_file:
with open(OPTS.out_file , '''w''' ) as f:
json.dump(a__ , a__ )
else:
print(json.dumps(a__ , indent=2 ) )
if __name__ == "__main__":
UpperCAmelCase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("""Agg""")
import matplotlib.pyplot as plt
main()
| 420 | """simple docstring"""
from math import asin, atan, cos, radians, sin, sqrt, tan
UpperCAmelCase = 6_37_81_37.0
UpperCAmelCase = 6_35_67_52.31_42_45
UpperCAmelCase = 6_378_137
def lowercase ( a__ : float , a__ : float , a__ : float , a__ : float ) -> float:
_UpperCamelCase = (AXIS_A - AXIS_B) / AXIS_A
_UpperCamelCase = atan((1 - flattening) * tan(radians(a__ ) ) )
_UpperCamelCase = atan((1 - flattening) * tan(radians(a__ ) ) )
_UpperCamelCase = radians(a__ )
_UpperCamelCase = radians(a__ )
# Equation
_UpperCamelCase = sin((phi_a - phi_a) / 2 )
_UpperCamelCase = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
_UpperCamelCase = sqrt(sin_sq_phi + (cos(a__ ) * cos(a__ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 420 | 1 |
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if not (isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase )):
raise ValueError('longest_common_substring() takes two strings for inputs' )
lowercase = len(_UpperCAmelCase )
lowercase = len(_UpperCAmelCase )
lowercase = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
lowercase = 0
lowercase = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
lowercase = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
lowercase = i
lowercase = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 314 |
import re
def __snake_case ( _UpperCAmelCase ):
"""simple docstring"""
if len(re.findall('[ATCG]' , _UpperCAmelCase ) ) != len(_UpperCAmelCase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 314 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase_ ( __a ):
def __init__( self : List[str] , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = data
def __iter__( self : List[Any] ):
'''simple docstring'''
for element in self.data:
yield element
def a__ ( lowerCAmelCase__=True ) -> str:
UpperCAmelCase__ : Optional[int] = Accelerator(even_batches=lowerCAmelCase__ )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> Optional[int]:
if iterable:
UpperCAmelCase__ : int = DummyIterableDataset(torch.as_tensor(range(lowerCAmelCase__ ) ) )
else:
UpperCAmelCase__ : str = TensorDataset(torch.as_tensor(range(lowerCAmelCase__ ) ) )
UpperCAmelCase__ : int = DataLoader(lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = accelerator.prepare(lowerCAmelCase__ )
return dl
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Union[str, Any]:
UpperCAmelCase__ : List[str] = create_dataloader(accelerator=lowerCAmelCase__ , dataset_size=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
UpperCAmelCase__ : str = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def a__ ( ) -> Tuple:
UpperCAmelCase__ : Tuple = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
lowerCAmelCase__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
lowerCAmelCase__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def a__ ( ) -> List[Any]:
UpperCAmelCase__ : Any = create_accelerator(even_batches=lowerCAmelCase__ )
verify_dataloader_batch_sizes(
lowerCAmelCase__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
lowerCAmelCase__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def a__ ( ) -> Optional[Any]:
UpperCAmelCase__ : List[str] = create_accelerator(even_batches=lowerCAmelCase__ )
UpperCAmelCase__ : str = torch.nn.Linear(1 , 1 )
UpperCAmelCase__ : Union[str, Any] = accelerator.prepare(lowerCAmelCase__ )
UpperCAmelCase__ : Tuple = create_dataloader(lowerCAmelCase__ , dataset_size=3 , batch_size=1 )
UpperCAmelCase__ : Optional[int] = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(lowerCAmelCase__ ):
UpperCAmelCase__ : Any = ddp_model(batch[0].float() )
UpperCAmelCase__ : Dict = output.sum()
loss.backward()
batch_idxs.append(lowerCAmelCase__ )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def a__ ( lowerCAmelCase__ ) -> Tuple:
with warnings.catch_warnings(record=lowerCAmelCase__ ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , lowerCAmelCase__ )
assert "only supported for multi-GPU" in str(w[-1].message )
def a__ ( ) -> Optional[int]:
UpperCAmelCase__ : Optional[Any] = True
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Tuple = create_accelerator(even_batches=lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = torch.nn.Linear(1 , 1 )
UpperCAmelCase__ : List[Any] = accelerator.prepare(lowerCAmelCase__ )
UpperCAmelCase__ : Tuple = create_dataloader(lowerCAmelCase__ , dataset_size=3 , batch_size=1 )
UpperCAmelCase__ : Optional[int] = create_dataloader(lowerCAmelCase__ , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=lowerCAmelCase__ ):
UpperCAmelCase__ : Optional[int] = train_dl.batch_sampler.even_batches
UpperCAmelCase__ : Optional[Any] = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def a__ ( ) -> str:
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : str = create_accelerator(even_batches=lowerCAmelCase__ )
UpperCAmelCase__ : List[str] = torch.nn.Linear(1 , 1 )
UpperCAmelCase__ : str = accelerator.prepare(lowerCAmelCase__ )
create_dataloader(lowerCAmelCase__ , dataset_size=3 , batch_size=1 , iterable=lowerCAmelCase__ )
UpperCAmelCase__ : Tuple = create_dataloader(lowerCAmelCase__ , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings('''ignore''' )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=lowerCAmelCase__ ):
UpperCAmelCase__ : Union[str, Any] = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def a__ ( ) -> int:
UpperCAmelCase__ : Union[str, Any] = create_accelerator()
UpperCAmelCase__ : List[str] = torch.nn.Linear(1 , 1 )
UpperCAmelCase__ : int = accelerator.prepare(lowerCAmelCase__ )
create_dataloader(lowerCAmelCase__ , dataset_size=3 , batch_size=1 , iterable=lowerCAmelCase__ )
with warnings.catch_warnings(record=lowerCAmelCase__ ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=lowerCAmelCase__ ):
pass
assert issubclass(w[-1].category , lowerCAmelCase__ )
assert "only supported for map-style datasets" in str(w[-1].message )
def a__ ( ) -> Optional[Any]:
UpperCAmelCase__ : Union[str, Any] = create_accelerator()
accelerator.print('''Test that even_batches variable ensures uniform batches across processes''' )
test_default_ensures_even_batch_sizes()
accelerator.print('''Run tests with even_batches disabled''' )
test_can_disable_even_batches()
accelerator.print('''Test joining uneven inputs''' )
test_can_join_uneven_inputs()
accelerator.print('''Test overriding even_batches when joining uneven inputs''' )
test_join_can_override_even_batches()
accelerator.print('''Test overriding even_batches for mixed dataloader types''' )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print('''Test overriding even_batches raises a warning for iterable dataloaders''' )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print('''Test join with non DDP distributed raises warning''' )
UpperCAmelCase__ : Tuple = accelerator.state.distributed_type
UpperCAmelCase__ : Dict = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(lowerCAmelCase__ )
UpperCAmelCase__ : int = original_state
if __name__ == "__main__":
main()
| 75 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case : List[Any] =get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =BartphoTokenizer
snake_case_ =False
snake_case_ =True
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
super().setUp()
lowerCAmelCase__ : Optional[int] = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
lowerCAmelCase__ : int = dict(zip(__lowerCamelCase ,range(len(__lowerCamelCase ) ) ) )
lowerCAmelCase__ : Union[str, Any] = {'''unk_token''': '''<unk>'''}
lowerCAmelCase__ : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
lowerCAmelCase__ : Tuple = BartphoTokenizer(__lowerCamelCase ,self.monolingual_vocab_file ,**self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ (self ,**__lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname ,**__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : int = '''This is a là test'''
lowerCAmelCase__ : Optional[Any] = '''This is a<unk><unk> test'''
return input_text, output_text
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
lowerCAmelCase__ : str = BartphoTokenizer(__lowerCamelCase ,self.monolingual_vocab_file ,**self.special_tokens_map )
lowerCAmelCase__ : int = '''This is a là test'''
lowerCAmelCase__ : Optional[Any] = '''▁This ▁is ▁a ▁l à ▁t est'''.split()
lowerCAmelCase__ : List[str] = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : Any = tokens + [tokenizer.unk_token]
lowerCAmelCase__ : int = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) ,__lowerCamelCase )
| 647 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ = logging.get_logger(__name__)
lowercase__ = torch.device('cpu')
def __a ( ) ->Tuple:
a__: Dict = 'http://images.cocodataset.org/val2017/000000039769.jpg'
a__: Optional[int] = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
def __a ( _SCREAMING_SNAKE_CASE ) ->Any:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]:
a__: Optional[int] = dct.pop(_SCREAMING_SNAKE_CASE )
a__: Optional[Any] = val
def __a ( _SCREAMING_SNAKE_CASE ) ->List[str]:
a__: Tuple = []
for k in state_dict.keys():
a__: Dict = k
if ".pwconv" in k:
a__: List[Any] = k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
a__: Tuple = k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
a__: Tuple = k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
a__: str = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
a__: int = k_new.split('.' )
if ls[2].isdigit():
a__: str = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
a__: Union[str, Any] = k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict:
a__: Optional[Any] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
a__: int = 1000
a__: Any = 'huggingface/label-files'
a__: Any = 'imagenet-1k-id2label.json'
a__: Any = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
a__: Optional[Any] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
a__: str = idalabel
a__: Any = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
a__: Dict = [3, 3, 6, 4]
a__: List[Any] = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
a__: int = [3, 3, 9, 6]
a__: Tuple = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
a__: Tuple = [4, 3, 10, 5]
a__: List[str] = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
a__: List[str] = [4, 4, 12, 6]
a__: Any = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
a__: int = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='cpu' , check_hash=_SCREAMING_SNAKE_CASE )
else:
a__: List[Any] = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )
a__: Optional[Any] = checkpoint
a__: int = create_rename_keys(_SCREAMING_SNAKE_CASE )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# load HuggingFace model
a__: Union[str, Any] = SwiftFormerForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
hf_model.load_state_dict(_SCREAMING_SNAKE_CASE )
# prepare test inputs
a__: Dict = prepare_img()
a__: Optional[int] = ViTImageProcessor.from_pretrained('preprocessor_config' )
a__: List[str] = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# compare outputs from both models
a__: Optional[Any] = get_expected_output(_SCREAMING_SNAKE_CASE )
a__: Any = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , _SCREAMING_SNAKE_CASE , atol=1e-3 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F'Saving model {swiftformer_name} to {pytorch_dump_folder_path}' )
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
lowercase__ = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 701 | """simple docstring"""
from string import ascii_uppercase
lowercase__ = {str(ord(c) - 55): c for c in ascii_uppercase}
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('int() can\'t convert non-string with explicit base' )
if num < 0:
raise ValueError('parameter must be positive int' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if base in (0, 1):
raise ValueError('base must be >= 2' )
if base > 36:
raise ValueError('base must be <= 36' )
a__: Any = ''
a__: Union[str, Any] = 0
a__: Dict = 0
while div != 1:
a__ , a__: List[str] = divmod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if base >= 11 and 9 < mod < 36:
a__: Optional[Any] = ALPHABET_VALUES[str(_SCREAMING_SNAKE_CASE )]
else:
a__: Union[str, Any] = str(_SCREAMING_SNAKE_CASE )
new_value += actual_value
a__: Optional[int] = num // base
a__: List[Any] = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(_SCREAMING_SNAKE_CASE )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 217 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class _snake_case :
"""simple docstring"""
_UpperCamelCase = LEDConfig
_UpperCamelCase = {}
_UpperCamelCase = "gelu"
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=13 , UpperCAmelCase__=7 , UpperCAmelCase__=True , UpperCAmelCase__=False , UpperCAmelCase__=99 , UpperCAmelCase__=32 , UpperCAmelCase__=2 , UpperCAmelCase__=4 , UpperCAmelCase__=37 , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=20 , UpperCAmelCase__=2 , UpperCAmelCase__=1 , UpperCAmelCase__=0 , UpperCAmelCase__=4 , ) -> Union[str, Any]:
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = eos_token_id
a_ = pad_token_id
a_ = bos_token_id
a_ = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
a_ = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
a_ = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
a_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
a_ = tf.concat([input_ids, eos_tensor] , axis=1 )
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
a_ = prepare_led_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
a_ = tf.concat(
[tf.zeros_like(UpperCAmelCase__ )[:, :-1], tf.ones_like(UpperCAmelCase__ )[:, -1:]] , axis=-1 , )
a_ = global_attention_mask
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[Any]:
a_ = TFLEDModel(config=UpperCAmelCase__ ).get_decoder()
a_ = inputs_dict['input_ids']
a_ = input_ids[:1, :]
a_ = inputs_dict['attention_mask'][:1, :]
a_ = 1
# first forward pass
a_ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
a_ , a_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
a_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
a_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
a_ = tf.concat([input_ids, next_tokens] , axis=-1 )
a_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
a_ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0]
a_ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
a_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
a_ = output_from_no_past[:, -3:, random_slice_idx]
a_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1e-3 )
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ) -> Union[str, Any]:
"""simple docstring"""
if attention_mask is None:
a_ = tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
a_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
a_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
a_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class _snake_case ( snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_UpperCamelCase = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_UpperCamelCase = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_UpperCamelCase = True
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def __SCREAMING_SNAKE_CASE ( self ) -> str:
a_ = TFLEDModelTester(self )
a_ = ConfigTester(self , config_class=UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self ) -> str:
a_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = tf.zeros_like(inputs_dict['attention_mask'] )
a_ = 2
a_ = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , )
a_ = True
a_ = self.model_tester.seq_length
a_ = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(UpperCAmelCase__ ):
a_ = outputs.decoder_attentions
self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(UpperCAmelCase__ ):
a_ = [t.numpy() for t in outputs.encoder_attentions]
a_ = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
a_ = True
a_ = False
a_ = False
a_ = model_class(UpperCAmelCase__ )
a_ = model(self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
a_ = len(UpperCAmelCase__ )
self.assertEqual(config.output_hidden_states , UpperCAmelCase__ )
check_encoder_attentions_output(UpperCAmelCase__ )
if self.is_encoder_decoder:
a_ = model_class(UpperCAmelCase__ )
a_ = model(self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase__ )
check_decoder_attentions_output(UpperCAmelCase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
a_ = True
a_ = model_class(UpperCAmelCase__ )
a_ = model(self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase__ )
check_encoder_attentions_output(UpperCAmelCase__ )
# Check attention is always last and order is fine
a_ = True
a_ = True
a_ = model_class(UpperCAmelCase__ )
a_ = model(self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCAmelCase__ ) )
self.assertEqual(model.config.output_hidden_states , UpperCAmelCase__ )
check_encoder_attentions_output(UpperCAmelCase__ )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
pass
def a ( _UpperCAmelCase ) -> str:
"""simple docstring"""
return tf.constant(__lowerCamelCase , dtype=tf.intaa )
__lowerCAmelCase =1e-4
@slow
@require_tf
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
a_ = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
a_ = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
a_ = prepare_led_inputs_dict(model.config , UpperCAmelCase__ , UpperCAmelCase__ )
a_ = model(**UpperCAmelCase__ )[0]
a_ = (1, 1024, 768)
self.assertEqual(output.shape , UpperCAmelCase__ )
# change to expected output here
a_ = tf.convert_to_tensor(
[[2.3_0_5_0, 2.8_2_7_9, 0.6_5_3_1], [-1.8_4_5_7, -0.1_4_5_5, -3.5_6_6_1], [-1.0_1_8_6, 0.4_5_8_6, -2.2_0_4_3]] , )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-3 )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
a_ = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
a_ = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
a_ = prepare_led_inputs_dict(model.config , UpperCAmelCase__ , UpperCAmelCase__ )
a_ = model(**UpperCAmelCase__ )[0]
a_ = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , UpperCAmelCase__ )
# change to expected output here
a_ = tf.convert_to_tensor(
[[3_3.6_5_0_7, 6.4_5_7_2, 1_6.8_0_8_9], [5.8_7_3_9, -2.4_2_3_8, 1_1.2_9_0_2], [-3.2_1_3_9, -4.3_1_4_9, 4.2_7_8_3]] , )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-3 , rtol=1e-3 )
| 697 |
def __A ( ) -> list[list[int]]:
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
__UpperCamelCase : str = generate_large_matrix()
__UpperCamelCase : Tuple = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __A ( __lowerCamelCase ) -> None:
assert all(row == sorted(__lowerCamelCase , reverse=__lowerCamelCase ) for row in grid )
assert all(list(__lowerCamelCase ) == sorted(__lowerCamelCase , reverse=__lowerCamelCase ) for col in zip(*__lowerCamelCase ) )
def __A ( __lowerCamelCase ) -> int:
a = 0
a = len(__lowerCamelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
a = (left + right) // 2
a = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
a = mid + 1
else:
a = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__lowerCamelCase )
def __A ( __lowerCamelCase ) -> int:
a = 0
a = len(grid[0] )
for i in range(len(__lowerCamelCase ) ):
a = find_negative_index(grid[i][:bound] )
total += bound
return (len(__lowerCamelCase ) * len(grid[0] )) - total
def __A ( __lowerCamelCase ) -> int:
return len([number for row in grid for number in row if number < 0] )
def __A ( __lowerCamelCase ) -> int:
a = 0
for row in grid:
for i, number in enumerate(__lowerCamelCase ):
if number < 0:
total += len(__lowerCamelCase ) - i
break
return total
def __A ( ) -> None:
from timeit import timeit
print("""Running benchmarks""" )
a = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
a = timeit(f'{func}(grid=grid)' , setup=__lowerCamelCase , number=500 )
print(f'{func}() took {time:0.4f} seconds' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 468 | 0 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowerCamelCase ( __a ):
_lowerCAmelCase : "DiagonalGaussianDistribution"
class lowerCamelCase ( __a , __a ):
_lowerCAmelCase : Tuple = True
@register_to_config
def __init__( self , lowercase__ = 3 , lowercase__ = 3 , lowercase__ = ("DownEncoderBlock2D",) , lowercase__ = ("UpDecoderBlock2D",) , lowercase__ = (6_4,) , lowercase__ = 1 , lowercase__ = "silu" , lowercase__ = 4 , lowercase__ = 3_2 , lowercase__ = 3_2 , lowercase__ = 0.1_8_2_1_5 , ):
super().__init__()
# pass init params to Encoder
__UpperCAmelCase : Optional[int] = Encoder(
in_channels=snake_case__ , out_channels=snake_case__ , down_block_types=snake_case__ , block_out_channels=snake_case__ , layers_per_block=snake_case__ , act_fn=snake_case__ , norm_num_groups=snake_case__ , double_z=snake_case__ , )
# pass init params to Decoder
__UpperCAmelCase : str = Decoder(
in_channels=snake_case__ , out_channels=snake_case__ , up_block_types=snake_case__ , block_out_channels=snake_case__ , layers_per_block=snake_case__ , norm_num_groups=snake_case__ , act_fn=snake_case__ , )
__UpperCAmelCase : Tuple = nn.Convad(2 * latent_channels , 2 * latent_channels , 1)
__UpperCAmelCase : List[str] = nn.Convad(snake_case__ , snake_case__ , 1)
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Union[str, Any] = False
# only relevant if vae tiling is enabled
__UpperCAmelCase : Optional[Any] = self.config.sample_size
__UpperCAmelCase : Optional[Any] = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple))
else self.config.sample_size
)
__UpperCAmelCase : Union[str, Any] = int(sample_size / (2 ** (len(self.config.block_out_channels) - 1)))
__UpperCAmelCase : Union[str, Any] = 0.2_5
def A( self , lowercase__ , lowercase__=False):
if isinstance(snake_case__ , (Encoder, Decoder)):
__UpperCAmelCase : Tuple = value
def A( self , lowercase__ = True):
__UpperCAmelCase : Tuple = use_tiling
def A( self):
self.enable_tiling(snake_case__)
def A( self):
__UpperCAmelCase : Any = True
def A( self):
__UpperCAmelCase : List[str] = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def A( self):
__UpperCAmelCase : Dict = {}
def fn_recursive_add_processors(lowercase__ , lowercase__ , lowercase__):
if hasattr(snake_case__ , '''set_processor'''):
__UpperCAmelCase : List[str] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , snake_case__ , snake_case__)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(snake_case__ , snake_case__ , snake_case__)
return processors
def A( self , lowercase__):
__UpperCAmelCase : Optional[Any] = len(self.attn_processors.keys())
if isinstance(snake_case__ , snake_case__) and len(snake_case__) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(snake_case__)} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes.")
def fn_recursive_attn_processor(lowercase__ , lowercase__ , lowercase__):
if hasattr(snake_case__ , '''set_processor'''):
if not isinstance(snake_case__ , snake_case__):
module.set_processor(snake_case__)
else:
module.set_processor(processor.pop(F"{name}.processor"))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , snake_case__ , snake_case__)
for name, module in self.named_children():
fn_recursive_attn_processor(snake_case__ , snake_case__ , snake_case__)
def A( self):
self.set_attn_processor(AttnProcessor())
@apply_forward_hook
def A( self , lowercase__ , lowercase__ = True):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(snake_case__ , return_dict=snake_case__)
if self.use_slicing and x.shape[0] > 1:
__UpperCAmelCase : Union[str, Any] = [self.encoder(snake_case__) for x_slice in x.split(1)]
__UpperCAmelCase : List[str] = torch.cat(snake_case__)
else:
__UpperCAmelCase : Dict = self.encoder(snake_case__)
__UpperCAmelCase : Dict = self.quant_conv(snake_case__)
__UpperCAmelCase : int = DiagonalGaussianDistribution(snake_case__)
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=snake_case__)
def A( self , lowercase__ , lowercase__ = True):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(snake_case__ , return_dict=snake_case__)
__UpperCAmelCase : Any = self.post_quant_conv(snake_case__)
__UpperCAmelCase : Any = self.decoder(snake_case__)
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case__)
@apply_forward_hook
def A( self , lowercase__ , lowercase__ = True):
if self.use_slicing and z.shape[0] > 1:
__UpperCAmelCase : Dict = [self._decode(snake_case__).sample for z_slice in z.split(1)]
__UpperCAmelCase : str = torch.cat(snake_case__)
else:
__UpperCAmelCase : Dict = self._decode(snake_case__).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=snake_case__)
def A( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Dict = min(a.shape[2] , b.shape[2] , snake_case__)
for y in range(snake_case__):
__UpperCAmelCase : str = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def A( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Dict = min(a.shape[3] , b.shape[3] , snake_case__)
for x in range(snake_case__):
__UpperCAmelCase : List[Any] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def A( self , lowercase__ , lowercase__ = True):
__UpperCAmelCase : Optional[Any] = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor))
__UpperCAmelCase : Union[str, Any] = int(self.tile_latent_min_size * self.tile_overlap_factor)
__UpperCAmelCase : List[str] = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__UpperCAmelCase : Dict = []
for i in range(0 , x.shape[2] , snake_case__):
__UpperCAmelCase : Union[str, Any] = []
for j in range(0 , x.shape[3] , snake_case__):
__UpperCAmelCase : List[str] = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__UpperCAmelCase : Dict = self.encoder(snake_case__)
__UpperCAmelCase : Union[str, Any] = self.quant_conv(snake_case__)
row.append(snake_case__)
rows.append(snake_case__)
__UpperCAmelCase : Any = []
for i, row in enumerate(snake_case__):
__UpperCAmelCase : Optional[Any] = []
for j, tile in enumerate(snake_case__):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__UpperCAmelCase : int = self.blend_v(rows[i - 1][j] , snake_case__ , snake_case__)
if j > 0:
__UpperCAmelCase : Any = self.blend_h(row[j - 1] , snake_case__ , snake_case__)
result_row.append(tile[:, :, :row_limit, :row_limit])
result_rows.append(torch.cat(snake_case__ , dim=3))
__UpperCAmelCase : Any = torch.cat(snake_case__ , dim=2)
__UpperCAmelCase : int = DiagonalGaussianDistribution(snake_case__)
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=snake_case__)
def A( self , lowercase__ , lowercase__ = True):
__UpperCAmelCase : Any = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor))
__UpperCAmelCase : Dict = int(self.tile_sample_min_size * self.tile_overlap_factor)
__UpperCAmelCase : Dict = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__UpperCAmelCase : int = []
for i in range(0 , z.shape[2] , snake_case__):
__UpperCAmelCase : Optional[Any] = []
for j in range(0 , z.shape[3] , snake_case__):
__UpperCAmelCase : Dict = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__UpperCAmelCase : List[Any] = self.post_quant_conv(snake_case__)
__UpperCAmelCase : Union[str, Any] = self.decoder(snake_case__)
row.append(snake_case__)
rows.append(snake_case__)
__UpperCAmelCase : int = []
for i, row in enumerate(snake_case__):
__UpperCAmelCase : Dict = []
for j, tile in enumerate(snake_case__):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__UpperCAmelCase : Union[str, Any] = self.blend_v(rows[i - 1][j] , snake_case__ , snake_case__)
if j > 0:
__UpperCAmelCase : Tuple = self.blend_h(row[j - 1] , snake_case__ , snake_case__)
result_row.append(tile[:, :, :row_limit, :row_limit])
result_rows.append(torch.cat(snake_case__ , dim=3))
__UpperCAmelCase : List[str] = torch.cat(snake_case__ , dim=2)
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case__)
def A( self , lowercase__ , lowercase__ = False , lowercase__ = True , lowercase__ = None , ):
__UpperCAmelCase : List[Any] = sample
__UpperCAmelCase : str = self.encode(snake_case__).latent_dist
if sample_posterior:
__UpperCAmelCase : Optional[int] = posterior.sample(generator=snake_case__)
else:
__UpperCAmelCase : Optional[int] = posterior.mode()
__UpperCAmelCase : Any = self.decode(snake_case__).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case__)
| 706 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowerCAmelCase = """https://www.indeed.co.in/jobs?q=mobile+app+development&l="""
def __SCREAMING_SNAKE_CASE ( lowercase_ = "mumbai" ) -> Generator[tuple[str, str], None, None]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
__UpperCAmelCase : str = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
__UpperCAmelCase : List[str] = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("""Bangalore"""), 1):
print(F'Job {i:>2} is {job[0]} at {job[1]}')
| 675 | 0 |
"""simple docstring"""
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( _A ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =PegasusTokenizer
__UpperCAmelCase : Any =PegasusTokenizerFast
__UpperCAmelCase : Optional[int] =True
__UpperCAmelCase : str =True
def snake_case ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = PegasusTokenizer(__a )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case ( self ):
return PegasusTokenizer.from_pretrained("google/pegasus-large" )
def snake_case ( self , **__a ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self , __a ):
return ("This is a test", "This is a test")
def snake_case ( self ):
__lowerCAmelCase = "</s>"
__lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def snake_case ( self ):
__lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "</s>" )
self.assertEqual(vocab_keys[-1] , "v" )
self.assertEqual(len(__a ) , 11_03 )
def snake_case ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 11_03 )
def snake_case ( self ):
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCAmelCase = (
"Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"
" </s> <pad> <pad> <pad>"
)
__lowerCAmelCase = rust_tokenizer([raw_input_str] , return_tensors=__a , add_special_tokens=__a ).input_ids[0]
__lowerCAmelCase = py_tokenizer([raw_input_str] , return_tensors=__a , add_special_tokens=__a ).input_ids[0]
self.assertListEqual(__a , __a )
def snake_case ( self ):
__lowerCAmelCase = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
__lowerCAmelCase = "<mask_1> To ensure a <mask_2> flow of bank resolutions."
__lowerCAmelCase = [2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
__lowerCAmelCase = tokenizer([raw_input_str] , return_tensors=__a ).input_ids[0]
self.assertListEqual(__a , __a )
def snake_case ( self ):
__lowerCAmelCase = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_61_03
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_03
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 10_24
__lowerCAmelCase = "To ensure a smooth flow of bank resolutions."
__lowerCAmelCase = [4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
__lowerCAmelCase = tokenizer([raw_input_str] , return_tensors=__a ).input_ids[0]
self.assertListEqual(__a , __a )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def snake_case ( self ):
__lowerCAmelCase = ["This is going to be way too long." * 1_50, "short example"]
__lowerCAmelCase = ["not super long but more than 5 tokens", "tiny"]
__lowerCAmelCase = self._large_tokenizer(__a , padding=__a , truncation=__a , return_tensors="pt" )
__lowerCAmelCase = self._large_tokenizer(
text_target=__a , max_length=5 , padding=__a , truncation=__a , return_tensors="pt" )
assert batch.input_ids.shape == (2, 10_24)
assert batch.attention_mask.shape == (2, 10_24)
assert targets["input_ids"].shape == (2, 5)
assert len(__a ) == 2 # input_ids, attention_mask.
@slow
def snake_case ( self ):
# fmt: off
__lowerCAmelCase = {"input_ids": [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="google/bigbird-pegasus-large-arxiv" , revision="ba85d0851d708441f91440d509690f1ab6353415" , )
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( _A ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =PegasusTokenizer
__UpperCAmelCase : List[str] =PegasusTokenizerFast
__UpperCAmelCase : List[str] =True
__UpperCAmelCase : str =True
def snake_case ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = PegasusTokenizer(__a , offset=0 , mask_token_sent=__a , mask_token="[MASK]" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case ( self ):
return PegasusTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv" )
def snake_case ( self , **__a ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self , __a ):
return ("This is a test", "This is a test")
def snake_case ( self ):
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCAmelCase = (
"Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"
" <pad> <pad> <pad>"
)
__lowerCAmelCase = rust_tokenizer([raw_input_str] , return_tensors=__a , add_special_tokens=__a ).input_ids[0]
__lowerCAmelCase = py_tokenizer([raw_input_str] , return_tensors=__a , add_special_tokens=__a ).input_ids[0]
self.assertListEqual(__a , __a )
@require_torch
def snake_case ( self ):
__lowerCAmelCase = ["This is going to be way too long." * 10_00, "short example"]
__lowerCAmelCase = ["not super long but more than 5 tokens", "tiny"]
__lowerCAmelCase = self._large_tokenizer(__a , padding=__a , truncation=__a , return_tensors="pt" )
__lowerCAmelCase = self._large_tokenizer(
text_target=__a , max_length=5 , padding=__a , truncation=__a , return_tensors="pt" )
assert batch.input_ids.shape == (2, 40_96)
assert batch.attention_mask.shape == (2, 40_96)
assert targets["input_ids"].shape == (2, 5)
assert len(__a ) == 2 # input_ids, attention_mask.
def snake_case ( self ):
__lowerCAmelCase = (
"This is an example string that is used to test the original TF implementation against the HF"
" implementation"
)
__lowerCAmelCase = self._large_tokenizer(__a ).input_ids
self.assertListEqual(
__a , [1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1] , )
| 636 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__UpperCAmelCase : Any = get_tests_dir("fixtures")
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> Optional[Any]:
# A mock response for an HTTP head request to emulate server down
snake_case__ :str = mock.Mock()
snake_case__ :Optional[int] = 500
snake_case__ :List[str] = {}
snake_case__ :str = HTTPError
snake_case__ :Tuple = {}
# Download this model to make sure it's in the cache.
snake_case__ :Tuple = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" ,return_value=UpperCamelCase ) as mock_head:
snake_case__ :Optional[int] = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase_ ( self ) -> Any:
# This test is for deprecated behavior and can be removed in v5
snake_case__ :Tuple = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class _snake_case ( unittest.TestCase ):
@classmethod
def lowerCAmelCase_ ( cls ) -> Union[str, Any]:
snake_case__ :Optional[int] = TOKEN
HfFolder.save_token(UpperCamelCase )
@classmethod
def lowerCAmelCase_ ( cls ) -> Dict:
try:
delete_repo(token=cls._token ,repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :str = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase )
feature_extractor.push_to_hub("test-feature-extractor" ,use_auth_token=self._token )
snake_case__ :Tuple = WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase ,getattr(UpperCamelCase ,UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token ,repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
UpperCamelCase ,repo_id="test-feature-extractor" ,push_to_hub=UpperCamelCase ,use_auth_token=self._token )
snake_case__ :Optional[int] = WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase ,getattr(UpperCamelCase ,UpperCamelCase ) )
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :str = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" ,use_auth_token=self._token )
snake_case__ :int = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase ,getattr(UpperCamelCase ,UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token ,repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
UpperCamelCase ,repo_id="valid_org/test-feature-extractor-org" ,push_to_hub=UpperCamelCase ,use_auth_token=self._token )
snake_case__ :Optional[Any] = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase ,getattr(UpperCamelCase ,UpperCamelCase ) )
def lowerCAmelCase_ ( self ) -> Optional[int]:
CustomFeatureExtractor.register_for_auto_class()
snake_case__ :Tuple = CustomFeatureExtractor.from_pretrained(UpperCamelCase )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" ,use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map ,{"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} ,)
snake_case__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(
f'{USER}/test-dynamic-feature-extractor' ,trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ ,"CustomFeatureExtractor" ) | 241 | 0 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase ( UpperCAmelCase__ ):
def _A ( self: Optional[Any] ):
_a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase__ , '''width_multiplier''' ) )
class UpperCAmelCase :
def __init__( self: Optional[Any] , __UpperCamelCase: Dict , __UpperCamelCase: Any=13 , __UpperCamelCase: int=64 , __UpperCamelCase: Any=2 , __UpperCamelCase: List[Any]=3 , __UpperCamelCase: Optional[Any]="swish" , __UpperCamelCase: Union[str, Any]=3 , __UpperCamelCase: Any=32 , __UpperCamelCase: Tuple=0.1 , __UpperCamelCase: Union[str, Any]=0.0_2 , __UpperCamelCase: Union[str, Any]=True , __UpperCamelCase: Optional[Any]=True , __UpperCamelCase: List[Any]=10 , __UpperCamelCase: Dict=None , __UpperCamelCase: Dict=0.2_5 , __UpperCamelCase: Union[str, Any]=0.0 , __UpperCamelCase: int=0.0 , ):
_a = parent
_a = batch_size
_a = image_size
_a = patch_size
_a = num_channels
_a = make_divisible(512 * width_multiplier , divisor=8 )
_a = hidden_act
_a = conv_kernel_size
_a = output_stride
_a = classifier_dropout_prob
_a = use_labels
_a = is_training
_a = num_labels
_a = initializer_range
_a = scope
_a = width_multiplier
_a = ffn_dropout
_a = attn_dropout
def _A ( self: List[str] ):
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.num_labels )
_a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_a = self.get_config()
return config, pixel_values, labels, pixel_labels
def _A ( self: Union[str, Any] ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def _A ( self: int , __UpperCamelCase: Optional[Any] , __UpperCamelCase: Dict , __UpperCamelCase: List[Any] , __UpperCamelCase: Union[str, Any] ):
_a = MobileViTVaModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_a = model(lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _A ( self: Union[str, Any] , __UpperCamelCase: Any , __UpperCamelCase: Optional[int] , __UpperCamelCase: Union[str, Any] , __UpperCamelCase: str ):
_a = self.num_labels
_a = MobileViTVaForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_a = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self: List[Any] , __UpperCamelCase: Tuple , __UpperCamelCase: List[Any] , __UpperCamelCase: Optional[Any] , __UpperCamelCase: int ):
_a = self.num_labels
_a = MobileViTVaForSemanticSegmentation(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_a = model(lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_a = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _A ( self: Union[str, Any] ):
_a = self.prepare_config_and_inputs()
_a = config_and_inputs
_a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
a: Dict = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
a: Optional[Any] = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a: Optional[int] = False
a: Optional[Any] = False
a: Dict = False
a: str = False
def _A ( self: Optional[Any] ):
_a = MobileViTVaModelTester(self )
_a = MobileViTVaConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def _A ( self: List[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def _A ( self: int ):
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def _A ( self: Tuple ):
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def _A ( self: Union[str, Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def _A ( self: int ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _A ( self: Optional[int] ):
pass
def _A ( self: Tuple ):
_a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(lowerCamelCase__ )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def _A ( self: Tuple ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _A ( self: Union[str, Any] ):
def check_hidden_states_output(__UpperCamelCase: Union[str, Any] , __UpperCamelCase: Dict , __UpperCamelCase: int ):
_a = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
_a = outputs.hidden_states
_a = 5
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_a = 2
for i in range(len(lowerCamelCase__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
_a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def _A ( self: str ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
def _A ( self: str ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase__ )
@slow
def _A ( self: Optional[int] ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = MobileViTVaModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def __snake_case ( ) -> Optional[Any]:
_a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
@cached_property
def _A ( self: str ):
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def _A ( self: List[Any] ):
_a = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
lowerCamelCase__ )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_a = model(**lowerCamelCase__ )
# verify the logits
_a = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
_a = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def _A ( self: Any ):
_a = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
_a = model.to(lowerCamelCase__ )
_a = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
_a = prepare_img()
_a = image_processor(images=lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_a = model(**lowerCamelCase__ )
_a = outputs.logits
# verify the logits
_a = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , lowerCamelCase__ )
_a = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
] , device=lowerCamelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def _A ( self: Any ):
_a = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
_a = model.to(lowerCamelCase__ )
_a = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
_a = prepare_img()
_a = image_processor(images=lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_a = model(**lowerCamelCase__ )
_a = outputs.logits.detach().cpu()
_a = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ , target_sizes=[(50, 60)] )
_a = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
_a = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ )
_a = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
| 718 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCAmelCase :
@staticmethod
def _A ( *__UpperCamelCase: Optional[int] , **__UpperCamelCase: str ):
pass
def __snake_case ( _UpperCamelCase ) -> Dict:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
lowerCamelCase :List[str] = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
a: List[str] = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _A ( self: Dict , __UpperCamelCase: Optional[int] , __UpperCamelCase: Tuple , __UpperCamelCase: Dict ):
_a = pipeline(
'''document-question-answering''' , model=__UpperCamelCase , tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
_a = INVOICE_URL
_a = list(zip(*apply_tesseract(load_image(__UpperCamelCase ) , __UpperCamelCase , '''''' ) ) )
_a = '''What is the placebo?'''
_a = [
{
'''image''': load_image(__UpperCamelCase ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def _A ( self: Tuple , __UpperCamelCase: Dict , __UpperCamelCase: List[str] ):
_a = dqa_pipeline(__UpperCamelCase , top_k=2 )
self.assertEqual(
__UpperCamelCase , [
[
{'''score''': ANY(__UpperCamelCase ), '''answer''': ANY(__UpperCamelCase ), '''start''': ANY(__UpperCamelCase ), '''end''': ANY(__UpperCamelCase )},
{'''score''': ANY(__UpperCamelCase ), '''answer''': ANY(__UpperCamelCase ), '''start''': ANY(__UpperCamelCase ), '''end''': ANY(__UpperCamelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def _A ( self: List[str] ):
_a = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
_a = INVOICE_URL
_a = '''How many cats are there?'''
_a = [
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
_a = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(nested_simplify(__UpperCamelCase , decimals=4 ) , __UpperCamelCase )
_a = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(__UpperCamelCase , decimals=4 ) , __UpperCamelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
_a = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_a = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(__UpperCamelCase , [] )
# We can optionnally pass directly the words and bounding boxes
_a = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_a = []
_a = []
_a = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , words=__UpperCamelCase , boxes=__UpperCamelCase , top_k=2 )
self.assertEqual(__UpperCamelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _A ( self: Tuple ):
_a = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
_a = INVOICE_URL
_a = '''What is the invoice number?'''
_a = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
_a = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
_a = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _A ( self: Dict ):
_a = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
_a = INVOICE_URL
_a = '''What is the invoice number?'''
_a = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
_a = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
_a = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _A ( self: Union[str, Any] ):
_a = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=__UpperCamelCase )
_a = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=__UpperCamelCase , revision='''3dc6de3''' , )
_a = INVOICE_URL
_a = '''What is the invoice number?'''
_a = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
_a = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
_a = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
_a = list(zip(*apply_tesseract(load_image(__UpperCamelCase ) , __UpperCamelCase , '''''' ) ) )
# This model should also work if `image` is set to None
_a = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _A ( self: List[Any] ):
_a = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=__UpperCamelCase )
_a = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=__UpperCamelCase , revision='''3dc6de3''' , max_seq_len=50 , )
_a = INVOICE_URL
_a = '''What is the invoice number?'''
_a = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
_a = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
_a = list(zip(*apply_tesseract(load_image(__UpperCamelCase ) , __UpperCamelCase , '''''' ) ) )
# This model should also work if `image` is set to None
_a = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def _A ( self: Optional[Any] ):
_a = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
_a = INVOICE_URL
_a = '''What is the invoice number?'''
_a = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(nested_simplify(__UpperCamelCase , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def _A ( self: str ):
pass
| 346 | 0 |
'''simple docstring'''
import sys
from collections import defaultdict
class a__:
'''simple docstring'''
def __init__( self):
"""simple docstring"""
lowerCAmelCase = []
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
return self.node_position[vertex]
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = pos
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
lowerCAmelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
lowerCAmelCase = 2 * start + 1
else:
lowerCAmelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
lowerCAmelCase , lowerCAmelCase = heap[smallest_child], positions[smallest_child]
lowerCAmelCase , lowerCAmelCase = (
heap[start],
positions[start],
)
lowerCAmelCase , lowerCAmelCase = temp, tempa
lowerCAmelCase = self.get_position(positions[smallest_child])
self.set_position(
positions[smallest_child] , self.get_position(positions[start]))
self.set_position(positions[start] , __lowerCAmelCase)
self.top_to_bottom(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = position[index]
while index != 0:
lowerCAmelCase = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2)
if val < heap[parent]:
lowerCAmelCase = heap[parent]
lowerCAmelCase = position[parent]
self.set_position(position[parent] , __lowerCAmelCase)
else:
lowerCAmelCase = val
lowerCAmelCase = temp
self.set_position(__lowerCAmelCase , __lowerCAmelCase)
break
lowerCAmelCase = parent
else:
lowerCAmelCase = val
lowerCAmelCase = temp
self.set_position(__lowerCAmelCase , 0)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = len(__lowerCAmelCase) // 2 - 1
for i in range(__lowerCAmelCase , -1 , -1):
self.top_to_bottom(__lowerCAmelCase , __lowerCAmelCase , len(__lowerCAmelCase) , __lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = positions[0]
lowerCAmelCase = sys.maxsize
self.top_to_bottom(__lowerCAmelCase , 0 , len(__lowerCAmelCase) , __lowerCAmelCase)
return temp
def snake_case__ ( _A: List[str] ) -> Dict:
'''simple docstring'''
lowerCAmelCase = Heap()
lowerCAmelCase = [0] * len(_A )
lowerCAmelCase = [-1] * len(_A ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
lowerCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex
lowerCAmelCase = []
for vertex in range(len(_A ) ):
distance_tv.append(sys.maxsize )
positions.append(_A )
heap.node_position.append(_A )
lowerCAmelCase = []
lowerCAmelCase = 1
lowerCAmelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
lowerCAmelCase = 0
lowerCAmelCase = distance
heap.heapify(_A , _A )
for _ in range(1 , len(_A ) ):
lowerCAmelCase = heap.delete_minimum(_A , _A )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
lowerCAmelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_A )]
):
lowerCAmelCase = distance
heap.bottom_to_top(
_A , heap.get_position(_A ) , _A , _A )
lowerCAmelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__lowercase = int(input('''Enter number of edges: ''').strip())
__lowercase = defaultdict(list)
for _ in range(edges_number):
__lowercase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 370 | '''simple docstring'''
from __future__ import annotations
import math
def snake_case__ ( _A: int ) -> list[int]:
'''simple docstring'''
if num <= 0:
lowerCAmelCase = f"{num}: Invalid input, please enter a positive integer."
raise ValueError(_A )
lowerCAmelCase = [True] * (num + 1)
lowerCAmelCase = []
lowerCAmelCase = 2
lowerCAmelCase = int(math.sqrt(_A ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(_A )
# Set multiples of start be False
for i in range(start * start , num + 1 , _A ):
if sieve[i] is True:
lowerCAmelCase = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(_A )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('''Enter a positive integer: ''').strip())))
| 370 | 1 |
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase : Any =logging.getLogger(__name__)
lowerCAmelCase : str =list(MODEL_WITH_LM_HEAD_MAPPING.keys())
lowerCAmelCase : Optional[int] =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _a :
_UpperCamelCase: Optional[str] = field(
default=snake_case_ , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
_UpperCamelCase: Optional[str] = field(
default=snake_case_ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(snake_case_ )} , )
_UpperCamelCase: Optional[str] = field(
default=snake_case_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_UpperCamelCase: Optional[str] = field(
default=snake_case_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_UpperCamelCase: Optional[str] = field(
default=snake_case_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class _a :
_UpperCamelCase: Optional[str] = field(
default=snake_case_ , metadata={"help": "The input training data file (a text file)."} )
_UpperCamelCase: Optional[str] = field(
default=snake_case_ , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
_UpperCamelCase: Optional[str] = field(
default=snake_case_ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
_UpperCamelCase: Optional[str] = field(
default=snake_case_ , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
_UpperCamelCase: Optional[str] = field(
default=snake_case_ , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
_UpperCamelCase: bool = field(
default=snake_case_ , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
_UpperCamelCase: bool = field(
default=snake_case_ , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
_UpperCamelCase: bool = field(default=snake_case_ , metadata={"help": "Whether ot not to use whole word mask."} )
_UpperCamelCase: float = field(
default=0.1_5 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
_UpperCamelCase: float = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
_UpperCamelCase: int = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
_UpperCamelCase: int = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
_UpperCamelCase: bool = field(
default=snake_case_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = False ,SCREAMING_SNAKE_CASE__ = None ,):
'''simple docstring'''
def _dataset(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" )
return LineByLineWithRefDataset(
tokenizer=SCREAMING_SNAKE_CASE__ ,file_path=SCREAMING_SNAKE_CASE__ ,block_size=args.block_size ,ref_path=SCREAMING_SNAKE_CASE__ ,)
return LineByLineTextDataset(tokenizer=SCREAMING_SNAKE_CASE__ ,file_path=SCREAMING_SNAKE_CASE__ ,block_size=args.block_size )
else:
return TextDataset(
tokenizer=SCREAMING_SNAKE_CASE__ ,file_path=SCREAMING_SNAKE_CASE__ ,block_size=args.block_size ,overwrite_cache=args.overwrite_cache ,cache_dir=SCREAMING_SNAKE_CASE__ ,)
if evaluate:
return _dataset(args.eval_data_file ,args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(SCREAMING_SNAKE_CASE__ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file ,args.train_ref_file )
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""" )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1 ) ,training_args.fpaa ,)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" ,SCREAMING_SNAKE_CASE__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(model_args.config_name ,cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowerCAmelCase : Tuple = AutoConfig.from_pretrained(model_args.model_name_or_path ,cache_dir=model_args.cache_dir )
else:
lowerCAmelCase : Union[str, Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.tokenizer_name:
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(model_args.tokenizer_name ,cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(model_args.model_name_or_path ,cache_dir=model_args.cache_dir )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""" )
if model_args.model_name_or_path:
lowerCAmelCase : Optional[Any] = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) ,config=SCREAMING_SNAKE_CASE__ ,cache_dir=model_args.cache_dir ,)
else:
logger.info("""Training new model from scratch""" )
lowerCAmelCase : Any = AutoModelWithLMHead.from_config(SCREAMING_SNAKE_CASE__ )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE__ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""" )
if data_args.block_size <= 0:
lowerCAmelCase : Union[str, Any] = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
lowerCAmelCase : str = min(data_args.block_size ,tokenizer.max_len )
# Get datasets
lowerCAmelCase : List[str] = (
get_dataset(SCREAMING_SNAKE_CASE__ ,tokenizer=SCREAMING_SNAKE_CASE__ ,cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
lowerCAmelCase : Dict = (
get_dataset(SCREAMING_SNAKE_CASE__ ,tokenizer=SCREAMING_SNAKE_CASE__ ,evaluate=SCREAMING_SNAKE_CASE__ ,cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
lowerCAmelCase : Any = DataCollatorForPermutationLanguageModeling(
tokenizer=SCREAMING_SNAKE_CASE__ ,plm_probability=data_args.plm_probability ,max_span_length=data_args.max_span_length ,)
else:
if data_args.mlm and data_args.whole_word_mask:
lowerCAmelCase : Tuple = DataCollatorForWholeWordMask(
tokenizer=SCREAMING_SNAKE_CASE__ ,mlm_probability=data_args.mlm_probability )
else:
lowerCAmelCase : Tuple = DataCollatorForLanguageModeling(
tokenizer=SCREAMING_SNAKE_CASE__ ,mlm=data_args.mlm ,mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowerCAmelCase : Any = Trainer(
model=SCREAMING_SNAKE_CASE__ ,args=SCREAMING_SNAKE_CASE__ ,data_collator=SCREAMING_SNAKE_CASE__ ,train_dataset=SCREAMING_SNAKE_CASE__ ,eval_dataset=SCREAMING_SNAKE_CASE__ ,prediction_loss_only=SCREAMING_SNAKE_CASE__ ,)
# Training
if training_args.do_train:
lowerCAmelCase : List[Any] = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=SCREAMING_SNAKE_CASE__ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCAmelCase : int = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowerCAmelCase : str = trainer.evaluate()
lowerCAmelCase : str = math.exp(eval_output["""eval_loss"""] )
lowerCAmelCase : List[str] = {"""perplexity""": perplexity}
lowerCAmelCase : List[str] = os.path.join(training_args.output_dir ,"""eval_results_lm.txt""" )
if trainer.is_world_master():
with open(SCREAMING_SNAKE_CASE__ ,"""w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" ,SCREAMING_SNAKE_CASE__ ,str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
results.update(SCREAMING_SNAKE_CASE__ )
return results
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 715 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Tuple = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Dict = 0
while b > 0:
if b & 1:
lowerCAmelCase : Optional[int] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 693 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__ )
class A ( SCREAMING_SNAKE_CASE__ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
snake_case__ :str = field(default='summarization' , metadata={'include_in_asdict_even_if_is_default': True} )
snake_case__ :ClassVar[Features] = Features({'text': Value('string' )} )
snake_case__ :ClassVar[Features] = Features({'summary': Value('string' )} )
snake_case__ :str = "text"
snake_case__ :str = "summary"
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return {self.text_column: "text", self.summary_column: "summary"}
| 48 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger()
@dataclass
class a_ :
'''simple docstring'''
__a: nn.Module
__a: List[nn.Module] = field(default_factory=a_ )
__a: list = field(default_factory=a_ )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ = len(list(m.modules() ) ) == 1 or isinstance(lowercase_ , nn.Convad ) or isinstance(lowercase_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowercase_ )
def __call__( self , lowercase_ ) -> List[str]:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowercase_ )
[x.remove() for x in self.handles]
return self
@property
def _lowercase ( self ) -> str:
'''simple docstring'''
return list(filter(lambda lowercase_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class a_ :
'''simple docstring'''
__a: nn.Module
__a: nn.Module
__a: int = 0
__a: List = field(default_factory=a_ )
__a: List = field(default_factory=a_ )
def __call__( self , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = Tracker(self.dest )(lowercase_ ).parametrized
lowerCAmelCase_ = Tracker(self.src )(lowercase_ ).parametrized
lowerCAmelCase_ = list(filter(lambda lowercase_ : type(lowercase_ ) not in self.src_skip , lowercase_ ) )
lowerCAmelCase_ = list(filter(lambda lowercase_ : type(lowercase_ ) not in self.dest_skip , lowercase_ ) )
if len(lowercase_ ) != len(lowercase_ ):
raise Exception(
f'''Numbers of operations are different. Source module has {len(lowercase_ )} operations while'''
f''' destination module has {len(lowercase_ )}.''' )
for dest_m, src_m in zip(lowercase_ , lowercase_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'''Transfered from={src_m} to={dest_m}''' )
def lowerCamelCase ( a_ , a_ , a_ , a_ = True ) -> Optional[Any]:
print(F'''Converting {name}...''' )
with torch.no_grad():
lowerCAmelCase_ = timm.create_model(a_ , pretrained=a_ ).eval()
lowerCAmelCase_ = ResNetForImageClassification(a_ ).eval()
lowerCAmelCase_ = ModuleTransfer(src=a_ , dest=a_ )
lowerCAmelCase_ = torch.randn((1, 3, 224, 224) )
module_transfer(a_ )
assert torch.allclose(from_model(a_ ) , our_model(a_ ).logits ), "The model logits don't match the original one."
lowerCAmelCase_ = F'''resnet{"-".join(name.split("resnet" ) )}'''
print(a_ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=a_ , )
# we can use the convnext one
lowerCAmelCase_ = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=a_ , )
print(F'''Pushed {checkpoint_name}''' )
def lowerCamelCase ( a_ , a_ = None , a_ = True ) -> str:
lowerCAmelCase_ = 'imagenet-1k-id2label.json'
lowerCAmelCase_ = 1_000
lowerCAmelCase_ = (1, num_labels)
lowerCAmelCase_ = 'huggingface/label-files'
lowerCAmelCase_ = num_labels
lowerCAmelCase_ = json.load(open(hf_hub_download(a_ , a_ , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase_ = {int(a_ ): v for k, v in idalabel.items()}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ = partial(a_ , num_labels=a_ , idalabel=a_ , labelaid=a_ )
lowerCAmelCase_ = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(a_ , names_to_config[model_name] , a_ , a_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(a_ , a_ , a_ , a_ )
return config, expected_shape
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 318 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ : List[str] = {
"""configuration_table_transformer""": [
"""TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TableTransformerConfig""",
"""TableTransformerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Dict = [
"""TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TableTransformerForObjectDetection""",
"""TableTransformerModel""",
"""TableTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 446 |
def __lowercase ( _A ) -> list[int]:
SCREAMING_SNAKE_CASE : int = [0 for i in range(len(_A ) )]
# initialize interval's left pointer and right pointer
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = 0, 0
for i in range(1 , len(_A ) ):
# case when current index is inside the interval
if i <= right_pointer:
SCREAMING_SNAKE_CASE : Any = min(right_pointer - i + 1 , z_result[i - left_pointer] )
SCREAMING_SNAKE_CASE : Optional[Any] = min_edge
while go_next(_A , _A , _A ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = i, i + z_result[i] - 1
return z_result
def __lowercase ( _A , _A , _A ) -> bool:
return i + z_result[i] < len(_A ) and s[z_result[i]] == s[i + z_result[i]]
def __lowercase ( _A , _A ) -> int:
SCREAMING_SNAKE_CASE : Any = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
SCREAMING_SNAKE_CASE : List[Any] = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_A ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 446 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 5 |
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = [0 for i in range(len(snake_case ) )]
# initialize interval's left pointer and right pointer
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = 0, 0
for i in range(1 , len(snake_case ) ):
# case when current index is inside the interval
if i <= right_pointer:
__SCREAMING_SNAKE_CASE : List[Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] )
__SCREAMING_SNAKE_CASE : Dict = min_edge
while go_next(snake_case , snake_case , snake_case ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = i, i + z_result[i] - 1
return z_result
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
return i + z_result[i] < len(snake_case ) and s[z_result[i]] == s[i + z_result[i]]
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
__SCREAMING_SNAKE_CASE : str = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(snake_case ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 | 0 |
"""simple docstring"""
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 705 |
"""simple docstring"""
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
_A : str = """sshleifer/mar_enro_6_3_student"""
class a__ ( a_ ):
def __magic_name__ ( self ):
super().setUp()
lowercase : int = cached_path(
"https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz" , extract_compressed_file=_a , )
lowercase : Any = f"""{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"""
@slow
@require_torch_gpu
def __magic_name__ ( self ):
MarianMTModel.from_pretrained(_a )
@slow
@require_torch_gpu
def __magic_name__ ( self ):
lowercase : Tuple = {
"$MAX_LEN": 64,
"$BS": 64,
"$GAS": 1,
"$ENRO_DIR": self.data_dir,
"facebook/mbart-large-cc25": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"--learning_rate=3e-5": "--learning_rate 3e-4",
"--num_train_epochs 6": "--num_train_epochs 1",
}
# Clean up bash script
lowercase : Tuple = (self.test_file_dir / "train_mbart_cc25_enro.sh").open().read().split("finetune.py" )[1].strip()
lowercase : Union[str, Any] = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" )
for k, v in env_vars_to_replace.items():
lowercase : Optional[Any] = bash_script.replace(_a , str(_a ) )
lowercase : Any = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
lowercase : List[Any] = f"""
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
""".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
lowercase : Union[str, Any] = ["finetune.py"] + bash_script.split() + args
with patch.object(_a , "argv" , _a ):
lowercase : Dict = argparse.ArgumentParser()
lowercase : Optional[Any] = pl.Trainer.add_argparse_args(_a )
lowercase : List[str] = SummarizationModule.add_model_specific_args(_a , os.getcwd() )
lowercase : Tuple = parser.parse_args()
lowercase : Optional[Any] = main(_a )
# Check metrics
lowercase : int = load_json(model.metrics_save_path )
lowercase : Union[str, Any] = metrics["val"][0]
lowercase : Tuple = metrics["val"][-1]
self.assertEqual(len(metrics["val"] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , _a )
self.assertGreater(last_step_stats["val_avg_gen_time"] , 0.0_1 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["val_avg_gen_time"] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["val_avg_bleu"] - first_step_stats["val_avg_bleu"] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["val_avg_bleu"] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["val"][-1]["val_avg_bleu"] - metrics["test"][-1]["test_avg_bleu"] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
lowercase : List[str] = os.listdir(_a )
lowercase : Optional[Any] = [x for x in contents if x.endswith(".ckpt" )][0]
lowercase : str = os.path.join(args.output_dir , _a )
lowercase : int = torch.load(_a , map_location="cpu" )
lowercase : int = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight"
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowercase : str = {os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["test"] ) == 1
class a__ ( a_ ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def __magic_name__ ( self ):
lowercase : List[Any] = f"""{self.test_file_dir_str}/test_data/wmt_en_ro"""
lowercase : Union[str, Any] = {
"--fp16_opt_level=O1": "",
"$MAX_LEN": 128,
"$BS": 16,
"$GAS": 1,
"$ENRO_DIR": data_dir,
"$m": "sshleifer/student_marian_en_ro_6_1",
"val_check_interval=0.25": "val_check_interval=1.0",
}
# Clean up bash script
lowercase : Optional[int] = (
(self.test_file_dir / "distil_marian_no_teacher.sh").open().read().split("distillation.py" )[1].strip()
)
lowercase : Tuple = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" )
lowercase : Optional[int] = bash_script.replace("--fp16 " , " " )
for k, v in env_vars_to_replace.items():
lowercase : Union[str, Any] = bash_script.replace(_a , str(_a ) )
lowercase : Any = self.get_auto_remove_tmp_dir()
lowercase : str = bash_script.replace("--fp16" , "" )
lowercase : Any = 6
lowercase : Optional[int] = (
["distillation.py"]
+ bash_script.split()
+ [
f"""--output_dir={output_dir}""",
"--gpus=1",
"--learning_rate=1e-3",
f"""--num_train_epochs={epochs}""",
"--warmup_steps=10",
"--val_check_interval=1.0",
"--do_predict",
]
)
with patch.object(_a , "argv" , _a ):
lowercase : Optional[int] = argparse.ArgumentParser()
lowercase : List[str] = pl.Trainer.add_argparse_args(_a )
lowercase : Any = SummarizationDistiller.add_model_specific_args(_a , os.getcwd() )
lowercase : str = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
lowercase : Dict = distill_main(_a )
# Check metrics
lowercase : Tuple = load_json(model.metrics_save_path )
lowercase : int = metrics["val"][0]
lowercase : Tuple = metrics["val"][-1]
assert len(metrics["val"] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.0_1
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , _a )
# check lightning ckpt can be loaded and has a reasonable statedict
lowercase : int = os.listdir(_a )
lowercase : Dict = [x for x in contents if x.endswith(".ckpt" )][0]
lowercase : List[str] = os.path.join(args.output_dir , _a )
lowercase : Any = torch.load(_a , map_location="cpu" )
lowercase : int = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight"
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowercase : str = {os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["test"] ) == 1
| 518 | 0 |
def a__ ( A__ ):
if not grid or not grid[0]:
raise TypeError('The grid does not contain the appropriate information' )
for cell_n in range(1, len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
SCREAMING_SNAKE_CASE_ : List[Any] = grid[0]
for row_n in range(1, len(SCREAMING_SNAKE_CASE_ ) ):
SCREAMING_SNAKE_CASE_ : int = grid[row_n]
SCREAMING_SNAKE_CASE_ : int = fill_row(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Any = grid[row_n]
return grid[-1][-1]
def a__ ( A__, A__ ):
current_row[0] += row_above[0]
for cell_n in range(1, len(SCREAMING_SNAKE_CASE_ ) ):
current_row[cell_n] += min(current_row[cell_n - 1], row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a__ : str = logging.get_logger(__name__)
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> YolosConfig:
"""simple docstring"""
UpperCAmelCase = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
UpperCAmelCase = 192
UpperCAmelCase = 768
UpperCAmelCase = 12
UpperCAmelCase = 3
UpperCAmelCase = [800, 1_333]
UpperCAmelCase = False
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase = 330
UpperCAmelCase = 14
UpperCAmelCase = 6
UpperCAmelCase = 1_320
elif "yolos_s" in yolos_name:
UpperCAmelCase = 384
UpperCAmelCase = 1_536
UpperCAmelCase = 12
UpperCAmelCase = 6
elif "yolos_b" in yolos_name:
UpperCAmelCase = [800, 1_344]
UpperCAmelCase = 91
UpperCAmelCase = '''huggingface/label-files'''
UpperCAmelCase = '''coco-detection-id2label.json'''
UpperCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def __snake_case ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : YolosConfig , SCREAMING_SNAKE_CASE_ : bool = False ) -> str:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
UpperCAmelCase = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase = in_proj_bias[: config.hidden_size]
UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase = in_proj_weight[-config.hidden_size :, :]
UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
if "backbone" in name:
UpperCAmelCase = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
UpperCAmelCase = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
UpperCAmelCase = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
UpperCAmelCase = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
UpperCAmelCase = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCAmelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
UpperCAmelCase = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
UpperCAmelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
UpperCAmelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCAmelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCAmelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCAmelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCAmelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
UpperCAmelCase = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
UpperCAmelCase = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
UpperCAmelCase = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def __snake_case ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : YolosForObjectDetection ) -> dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCAmelCase = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
UpperCAmelCase = key.split('''.''' )
UpperCAmelCase = int(key_split[2] )
UpperCAmelCase = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
UpperCAmelCase = val[:dim, :]
UpperCAmelCase = val[
dim : dim * 2, :
]
UpperCAmelCase = val[-dim:, :]
else:
UpperCAmelCase = val[:dim]
UpperCAmelCase = val[dim : dim * 2]
UpperCAmelCase = val[-dim:]
else:
UpperCAmelCase = val
return orig_state_dict
def __snake_case ( ) -> torch.Tensor:
"""simple docstring"""
UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def __snake_case ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ) -> str:
"""simple docstring"""
UpperCAmelCase = get_yolos_config(SCREAMING_SNAKE_CASE_ )
# load original state_dict
UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' )['''model''']
# load 🤗 model
UpperCAmelCase = YolosForObjectDetection(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by YolosImageProcessor
UpperCAmelCase = 800 if yolos_name != '''yolos_ti''' else 512
UpperCAmelCase = YolosImageProcessor(format='''coco_detection''' , size=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase, UpperCAmelCase = outputs.logits, outputs.pred_boxes
UpperCAmelCase, UpperCAmelCase = None, None
if yolos_name == "yolos_ti":
UpperCAmelCase = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
UpperCAmelCase = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
UpperCAmelCase = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
UpperCAmelCase = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
UpperCAmelCase = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
UpperCAmelCase = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
UpperCAmelCase = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
UpperCAmelCase = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
UpperCAmelCase = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(f"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
UpperCAmelCase = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
UpperCAmelCase = model_mapping[yolos_name]
image_processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='''hustvl''' )
model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='''hustvl''' )
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--yolos_name',
default='yolos_s_200_pre',
type=str,
help=(
'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','
' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'
),
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a__ : Optional[Any] = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 51 | 0 |
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowercase__:
def __init__( self :Optional[int] , lowerCamelCase_ :Dict , lowerCamelCase_ :int=13 , lowerCamelCase_ :int=32 , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :str=3 , lowerCamelCase_ :Any=16 , lowerCamelCase_ :Tuple=[1, 2, 1] , lowerCamelCase_ :Dict=[2, 2, 4] , lowerCamelCase_ :Dict=2 , lowerCamelCase_ :List[str]=2.0 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Union[str, Any]=0.0 , lowerCamelCase_ :Dict=0.0 , lowerCamelCase_ :List[str]=0.1 , lowerCamelCase_ :Optional[Any]="gelu" , lowerCamelCase_ :List[Any]=False , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Union[str, Any]=0.0_2 , lowerCamelCase_ :List[str]=1E-5 , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Tuple=None , lowerCamelCase_ :int=True , lowerCamelCase_ :Dict=10 , lowerCamelCase_ :List[Any]=8 , lowerCamelCase_ :Any=["stage1", "stage2", "stage3"] , lowerCamelCase_ :str=[1, 2, 3] , ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : List[str] = batch_size
SCREAMING_SNAKE_CASE : List[str] = image_size
SCREAMING_SNAKE_CASE : Optional[int] = patch_size
SCREAMING_SNAKE_CASE : Any = num_channels
SCREAMING_SNAKE_CASE : Dict = embed_dim
SCREAMING_SNAKE_CASE : Any = depths
SCREAMING_SNAKE_CASE : Union[str, Any] = num_heads
SCREAMING_SNAKE_CASE : Tuple = window_size
SCREAMING_SNAKE_CASE : Optional[int] = mlp_ratio
SCREAMING_SNAKE_CASE : Tuple = qkv_bias
SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = drop_path_rate
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : Tuple = use_absolute_embeddings
SCREAMING_SNAKE_CASE : Any = patch_norm
SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Tuple = is_training
SCREAMING_SNAKE_CASE : List[str] = scope
SCREAMING_SNAKE_CASE : Dict = use_labels
SCREAMING_SNAKE_CASE : List[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : str = encoder_stride
SCREAMING_SNAKE_CASE : Any = out_features
SCREAMING_SNAKE_CASE : Optional[int] = out_indices
def __lowerCAmelCase ( self :Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :Union[str, Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = MaskFormerSwinModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
SCREAMING_SNAKE_CASE : int = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = MaskFormerSwinBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : str = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_lowerCamelCase ):
SCREAMING_SNAKE_CASE : Tuple = ['''stem''']
SCREAMING_SNAKE_CASE : Dict = MaskFormerSwinBackbone(config=_lowerCamelCase )
def __lowerCAmelCase ( self :Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = config_and_inputs
SCREAMING_SNAKE_CASE : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
UpperCamelCase = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __lowerCAmelCase ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = MaskFormerSwinModelTester(self )
SCREAMING_SNAKE_CASE : Union[str, Any] = ConfigTester(self , config_class=_lowerCamelCase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'''`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'''
''' `nn.DataParallel`'''
) )
def __lowerCAmelCase ( self :Tuple ) -> Tuple:
'''simple docstring'''
pass
def __lowerCAmelCase ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
return
def __lowerCAmelCase ( self :str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def __lowerCAmelCase ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCamelCase )
@unittest.skip('''Swin does not use inputs_embeds''' )
def __lowerCAmelCase ( self :List[Any] ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip('''Swin does not support feedforward chunking''' )
def __lowerCAmelCase ( self :int ) -> List[str]:
'''simple docstring'''
pass
def __lowerCAmelCase ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : str = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) )
def __lowerCAmelCase ( self :Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = model_class(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Optional[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
@unittest.skip(reason='''MaskFormerSwin is only used as backbone and doesn\'t support output_attentions''' )
def __lowerCAmelCase ( self :Any ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason='''MaskFormerSwin is only used as an internal backbone''' )
def __lowerCAmelCase ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
pass
def __lowerCAmelCase ( self :str , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :str ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Optional[Any] = outputs.hidden_states
SCREAMING_SNAKE_CASE : List[str] = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
# Swin has a different seq_length
SCREAMING_SNAKE_CASE : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __lowerCAmelCase ( self :Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : str = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Union[str, Any] = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[str] = 3
SCREAMING_SNAKE_CASE : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
SCREAMING_SNAKE_CASE : str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE : Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
SCREAMING_SNAKE_CASE : List[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : int = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , (padded_height, padded_width) )
@unittest.skip(reason='''MaskFormerSwin doesn\'t have pretrained checkpoints''' )
def __lowerCAmelCase ( self :Tuple ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def __lowerCAmelCase ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def __lowerCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
pass
def __lowerCAmelCase ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(lowerCamelCase_ :Union[str, Any] ):
SCREAMING_SNAKE_CASE : List[str] = 0
return t
def check_equivalence(lowerCamelCase_ :Dict , lowerCamelCase_ :Any , lowerCamelCase_ :Tuple , lowerCamelCase_ :Tuple={} ):
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = model(**_lowerCamelCase , return_dict=_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = model(**_lowerCamelCase , return_dict=_lowerCamelCase , **_lowerCamelCase ).to_tuple()
def recursive_check(lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :List[str] ):
if isinstance(_lowerCamelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_lowerCamelCase , _lowerCamelCase ):
recursive_check(_lowerCamelCase , _lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_lowerCamelCase , _lowerCamelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_lowerCamelCase ) , set_nan_tensor_to_zero(_lowerCamelCase ) , atol=1E-5 ) , msg=(
'''Tuple and dict output are not equal. Difference:'''
f" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"
f" {torch.isnan(_lowerCamelCase ).any()} and `inf`: {torch.isinf(_lowerCamelCase )}. Dict has"
f" `nan`: {torch.isnan(_lowerCamelCase ).any()} and `inf`: {torch.isinf(_lowerCamelCase )}."
) , )
recursive_check(_lowerCamelCase , _lowerCamelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Any = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : int = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , {'''output_hidden_states''': True} )
SCREAMING_SNAKE_CASE : List[str] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , {'''output_hidden_states''': True} )
@require_torch
class lowercase__( unittest.TestCase , __UpperCAmelCase ):
UpperCamelCase = (MaskFormerSwinBackbone,) if is_torch_available() else ()
UpperCamelCase = MaskFormerSwinConfig
def __lowerCAmelCase ( self :str ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = MaskFormerSwinModelTester(self )
def __lowerCAmelCase ( self :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Any = inputs_dict['''pixel_values'''].shape[0]
for backbone_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = backbone_class(_lowerCamelCase )
backbone.to(_lowerCamelCase )
backbone.eval()
SCREAMING_SNAKE_CASE : Optional[int] = backbone(**_lowerCamelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _lowerCamelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
SCREAMING_SNAKE_CASE : str = backbone(**_lowerCamelCase , output_hidden_states=_lowerCamelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
SCREAMING_SNAKE_CASE : int = backbone(**_lowerCamelCase , output_attentions=_lowerCamelCase )
self.assertIsNotNone(outputs.attentions )
| 700 |
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase__ : Optional[Any] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase__ : Optional[int] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase__ : Optional[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __A ( a_ : str , a_ : str )-> tuple[str, float]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = len([g for position, g in enumerate(a_ ) if g == main_target[position]] )
return (item, float(a_ ))
def __A ( a_ : str , a_ : str )-> tuple[str, str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = random.randint(0 , len(a_ ) - 1 )
SCREAMING_SNAKE_CASE : str = parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE : Dict = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __A ( a_ : str , a_ : list[str] )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = list(a_ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE : Any = random.choice(a_ )
return "".join(a_ )
def __A ( a_ : tuple[str, float] , a_ : list[tuple[str, float]] , a_ : list[str] , )-> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = []
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE : List[str] = int(parent_a[1] * 1_00 ) + 1
SCREAMING_SNAKE_CASE : Optional[Any] = 10 if child_n >= 10 else child_n
for _ in range(a_ ):
SCREAMING_SNAKE_CASE : List[str] = population_score[random.randint(0 , a_ )][0]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = crossover(parent_a[0] , a_ )
# Append new string to the population list.
pop.append(mutate(a_ , a_ ) )
pop.append(mutate(a_ , a_ ) )
return pop
def __A ( a_ : str , a_ : list[str] , a_ : bool = True )-> tuple[int, int, str]:
'''simple docstring'''
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE : List[Any] = F"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(a_ )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE : List[str] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE : str = F"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(a_ )
# Generate random starting population.
SCREAMING_SNAKE_CASE : Tuple = []
for _ in range(a_ ):
population.append(''''''.join([random.choice(a_ ) for i in range(len(a_ ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(a_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE : int = [evaluate(a_ , a_ ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE : List[Any] = sorted(a_ , key=lambda a_ : x[1] , reverse=a_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"\nGeneration: {generation}"
F"\nTotal Population:{total_population}"
F"\nBest score: {population_score[0][1]}"
F"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE : Optional[Any] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(a_ )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE : Optional[int] = [
(item, score / len(a_ )) for item, score in population_score
]
# This is selection
for i in range(a_ ):
population.extend(select(population_score[int(a_ )] , a_ , a_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(a_ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase__ : Dict = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowerCamelCase__ : int = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 18 | 0 |
from __future__ import annotations
import math
def A ( _lowerCamelCase ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = str(_lowerCamelCase )
_lowerCAmelCase : Any = [n]
for i in range(1 , len(_lowerCamelCase ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def A ( _lowerCamelCase ):
'''simple docstring'''
if len(str(_lowerCamelCase ) ) > 3:
if not is_prime(int(str(_lowerCamelCase )[-3:] ) ) or not is_prime(int(str(_lowerCamelCase )[:3] ) ):
return False
return True
def A ( _lowerCamelCase = 11 ):
'''simple docstring'''
_lowerCAmelCase : list[int] = []
_lowerCAmelCase : Any = 13
while len(_lowerCamelCase ) != count:
if validate(_lowerCamelCase ):
_lowerCAmelCase : List[str] = list_truncated_nums(_lowerCamelCase )
if all(is_prime(_lowerCamelCase ) for i in list_nums ):
list_truncated_primes.append(_lowerCamelCase )
num += 2
return list_truncated_primes
def A ( ):
'''simple docstring'''
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(f'''{sum(compute_truncated_primes(11)) = }''')
| 500 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_snake_case = 16
_snake_case = 32
def A ( _lowerCamelCase , _lowerCamelCase = 16 ):
'''simple docstring'''
_lowerCAmelCase : int = AutoTokenizer.from_pretrained("bert-base-cased" )
_lowerCAmelCase : List[Any] = load_dataset("glue" , "mrpc" )
def tokenize_function(_lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
_lowerCAmelCase : List[str] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCamelCase , max_length=_lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowerCAmelCase : List[str] = datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCAmelCase : Any = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowerCAmelCase : Optional[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowerCAmelCase : Optional[int] = 16
elif accelerator.mixed_precision != "no":
_lowerCAmelCase : str = 8
else:
_lowerCAmelCase : int = None
return tokenizer.pad(
_lowerCamelCase , padding="longest" , max_length=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_tensors="pt" , )
# Instantiate dataloaders.
_lowerCAmelCase : Optional[Any] = DataLoader(
tokenized_datasets["train"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
_lowerCAmelCase : Dict = DataLoader(
tokenized_datasets["validation"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_snake_case = mocked_dataloaders # noqa: F811
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _lowerCamelCase ) == "1":
_lowerCAmelCase : str = 2
# New Code #
_lowerCAmelCase : Optional[Any] = int(args.gradient_accumulation_steps )
# Initialize accelerator
_lowerCAmelCase : int = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_lowerCamelCase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCAmelCase : Union[str, Any] = config["lr"]
_lowerCAmelCase : List[Any] = int(config["num_epochs"] )
_lowerCAmelCase : str = int(config["seed"] )
_lowerCAmelCase : str = int(config["batch_size"] )
_lowerCAmelCase : int = evaluate.load("glue" , "mrpc" )
set_seed(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Any = get_dataloaders(_lowerCamelCase , _lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCAmelCase : Dict = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowerCAmelCase : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
_lowerCAmelCase : Dict = AdamW(params=model.parameters() , lr=_lowerCamelCase )
# Instantiate scheduler
_lowerCAmelCase : Any = get_linear_schedule_with_warmup(
optimizer=_lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Now we train the model
for epoch in range(_lowerCamelCase ):
model.train()
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = model(**_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = output.loss
accelerator.backward(_lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCAmelCase : str = model(**_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = outputs.logits.argmax(dim=-1 )
_lowerCAmelCase , _lowerCAmelCase : str = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_lowerCamelCase , references=_lowerCamelCase , )
_lowerCAmelCase : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , _lowerCamelCase )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_lowerCamelCase , default=_lowerCamelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=_lowerCamelCase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
_lowerCAmelCase : str = parser.parse_args()
_lowerCAmelCase : List[Any] = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main()
| 500 | 1 |
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
if not is_accelerate_available():
return method
snake_case_ : Tuple = version.parse(accelerate.__version__ ).base_version
if version.parse(_lowerCAmelCase ) < version.parse("0.17.0" ):
return method
def wrapper(self ,*__magic_name__ ,**__magic_name__ ):
if hasattr(self ,"_hf_hook" ) and hasattr(self._hf_hook ,"pre_forward" ):
self._hf_hook.pre_forward(self )
return method(self ,*_lowerCAmelCase ,**_lowerCAmelCase )
return wrapper
| 701 |
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class A_ :
"""simple docstring"""
def __init__( self :Dict ) -> List[str]:
'''simple docstring'''
snake_case_ : int = {}
def _A ( self :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any]=1 ) -> Any:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
snake_case_ : Optional[int] = [[w, v]]
if not self.graph.get(lowerCAmelCase__ ):
snake_case_ : Dict = []
def _A ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
return list(self.graph )
def _A ( self :str , lowerCAmelCase__ :Any , lowerCAmelCase__ :int ) -> List[Any]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
def _A ( self :List[str] , lowerCAmelCase__ :Optional[Any]=-2 , lowerCAmelCase__ :str=-1 ) -> str:
'''simple docstring'''
if s == d:
return []
snake_case_ : str = []
snake_case_ : Optional[int] = []
if s == -2:
snake_case_ : List[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Dict = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : str = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Union[str, Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def _A ( self :Tuple , lowerCAmelCase__ :int=-1 ) -> int:
'''simple docstring'''
if c == -1:
snake_case_ : Any = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
snake_case_ : Optional[Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def _A ( self :Tuple , lowerCAmelCase__ :Dict=-2 ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = deque()
snake_case_ : Optional[Any] = []
if s == -2:
snake_case_ : Tuple = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
snake_case_ : Optional[int] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _A ( self :List[str] , lowerCAmelCase__ :str ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _A ( self :Any , lowerCAmelCase__ :int ) -> Optional[Any]:
'''simple docstring'''
return len(self.graph[u] )
def _A ( self :Tuple , lowerCAmelCase__ :List[str]=-2 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = []
snake_case_ : str = []
if s == -2:
snake_case_ : Optional[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : int = s
snake_case_ : Optional[int] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowerCAmelCase__ ) != 0:
snake_case_ : int = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Union[str, Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return sorted_nodes
def _A ( self :Dict ) -> Any:
'''simple docstring'''
snake_case_ : Dict = []
snake_case_ : Any = []
snake_case_ : str = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Optional[int] = -2
snake_case_ : Any = []
snake_case_ : List[Any] = s
snake_case_ : int = False
snake_case_ : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Any = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[Any] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : str = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : List[str] = s
snake_case_ : Optional[int] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def _A ( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = []
snake_case_ : Tuple = []
snake_case_ : List[str] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : str = -2
snake_case_ : List[str] = []
snake_case_ : List[Any] = s
snake_case_ : List[str] = False
snake_case_ : Dict = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Any = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Tuple = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : List[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : int = s
snake_case_ : Union[str, Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def _A ( self :Optional[int] , lowerCAmelCase__ :Optional[int]=-2 , lowerCAmelCase__ :Tuple=-1 ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Optional[Any] = time()
return end - begin
def _A ( self :Any , lowerCAmelCase__ :Tuple=-2 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = time()
self.bfs(lowerCAmelCase__ )
snake_case_ : Any = time()
return end - begin
class A_ :
"""simple docstring"""
def __init__( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = {}
def _A ( self :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any]=1 ) -> str:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
snake_case_ : str = [[w, v]]
# add the other way
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
snake_case_ : List[str] = [[w, u]]
def _A ( self :Dict , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
# the other way round
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowerCAmelCase__ )
def _A ( self :Optional[Any] , lowerCAmelCase__ :Optional[Any]=-2 , lowerCAmelCase__ :Optional[int]=-1 ) -> int:
'''simple docstring'''
if s == d:
return []
snake_case_ : Any = []
snake_case_ : Dict = []
if s == -2:
snake_case_ : Optional[int] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : str = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def _A ( self :Optional[int] , lowerCAmelCase__ :str=-1 ) -> List[Any]:
'''simple docstring'''
if c == -1:
snake_case_ : Optional[int] = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
snake_case_ : str = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def _A ( self :Any , lowerCAmelCase__ :Optional[Any]=-2 ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = deque()
snake_case_ : Optional[Any] = []
if s == -2:
snake_case_ : List[Any] = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
snake_case_ : Optional[int] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _A ( self :str , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return len(self.graph[u] )
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case_ : Any = []
snake_case_ : Optional[Any] = []
snake_case_ : Optional[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = -2
snake_case_ : Optional[int] = []
snake_case_ : Tuple = s
snake_case_ : Optional[Any] = False
snake_case_ : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Optional[int] = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[int] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : List[Any] = s
snake_case_ : Dict = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = []
snake_case_ : int = []
snake_case_ : List[str] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = -2
snake_case_ : int = []
snake_case_ : int = s
snake_case_ : Optional[Any] = False
snake_case_ : List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Tuple = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[Any] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Tuple = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = s
snake_case_ : Tuple = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def _A ( self :Any ) -> Tuple:
'''simple docstring'''
return list(self.graph )
def _A ( self :Optional[Any] , lowerCAmelCase__ :Tuple=-2 , lowerCAmelCase__ :Optional[int]=-1 ) -> str:
'''simple docstring'''
snake_case_ : List[str] = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[Any] = time()
return end - begin
def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[Any]=-2 ) -> int:
'''simple docstring'''
snake_case_ : List[str] = time()
self.bfs(lowerCAmelCase__ )
snake_case_ : Tuple = time()
return end - begin
| 656 | 0 |
A_ : List[str] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[Any]:
# Return True if there is node that has not iterated.
UpperCamelCase_: List[str] = [False] * len(UpperCAmelCase__ )
UpperCamelCase_: Dict = [s]
UpperCamelCase_: List[str] = True
while queue:
UpperCamelCase_: Optional[int] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(UpperCAmelCase__ )
UpperCamelCase_: Tuple = True
UpperCamelCase_: str = u
return visited[t]
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[Any]:
UpperCamelCase_: int = [-1] * (len(UpperCAmelCase__ ))
UpperCamelCase_: List[str] = 0
UpperCamelCase_: Any = []
UpperCamelCase_: Union[str, Any] = [i[:] for i in graph] # Record original cut, copy.
while bfs(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
UpperCamelCase_: Tuple = float('Inf' )
UpperCamelCase_: Optional[int] = sink
while s != source:
# Find the minimum value in select path
UpperCamelCase_: int = min(UpperCAmelCase__ , graph[parent[s]][s] )
UpperCamelCase_: Tuple = parent[s]
max_flow += path_flow
UpperCamelCase_: Dict = sink
while v != source:
UpperCamelCase_: Any = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCamelCase_: Optional[int] = parent[v]
for i in range(len(UpperCAmelCase__ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5)) | 57 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase ( enum.Enum ):
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = 2
@add_end_docstrings(lowercase_ )
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self :Any , *_lowercase :Optional[Any] , **_lowercase :Union[str, Any] ):
'''simple docstring'''
super().__init__(*_lowercase , **_lowercase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowercase__ = None
if self.model.config.prefix is not None:
lowercase__ = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowercase__ = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowercase__ , lowercase__ , lowercase__ = self._sanitize_parameters(prefix=_lowercase , **self._forward_params )
lowercase__ = {**self._preprocess_params, **preprocess_params}
lowercase__ = {**self._forward_params, **forward_params}
def UpperCAmelCase ( self :Tuple , _lowercase :Optional[Any]=None , _lowercase :List[Any]=None , _lowercase :List[str]=None , _lowercase :Optional[Any]=None , _lowercase :Optional[int]=None , _lowercase :Any=None , _lowercase :Any=None , _lowercase :Dict=None , **_lowercase :Union[str, Any] , ):
'''simple docstring'''
lowercase__ = {}
if prefix is not None:
lowercase__ = prefix
if prefix:
lowercase__ = self.tokenizer(
_lowercase , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
lowercase__ = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
" [None, 'hole']" )
lowercase__ = handle_long_generation
preprocess_params.update(_lowercase )
lowercase__ = generate_kwargs
lowercase__ = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
lowercase__ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
lowercase__ = ReturnType.TENSORS
if return_type is not None:
lowercase__ = return_type
if clean_up_tokenization_spaces is not None:
lowercase__ = clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase__ = self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
if len(_lowercase ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
lowercase__ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase ( self :int , *_lowercase :Optional[int] , **_lowercase :List[str] ):
'''simple docstring'''
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*_lowercase , **_lowercase )
def __call__( self :Union[str, Any] , _lowercase :Dict , **_lowercase :Tuple ):
'''simple docstring'''
return super().__call__(_lowercase , **_lowercase )
def UpperCAmelCase ( self :Optional[int] , _lowercase :Tuple , _lowercase :Optional[int]="" , _lowercase :Tuple=None , **_lowercase :List[str] ):
'''simple docstring'''
lowercase__ = self.tokenizer(
prefix + prompt_text , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
lowercase__ = prompt_text
if handle_long_generation == "hole":
lowercase__ = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowercase__ = generate_kwargs["max_new_tokens"]
else:
lowercase__ = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowercase__ = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
lowercase__ = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
lowercase__ = inputs["attention_mask"][:, -keep_length:]
return inputs
def UpperCAmelCase ( self :str , _lowercase :int , **_lowercase :str ):
'''simple docstring'''
lowercase__ = model_inputs["input_ids"]
lowercase__ = model_inputs.get("attention_mask" , _lowercase )
# Allow empty prompts
if input_ids.shape[1] == 0:
lowercase__ = None
lowercase__ = None
lowercase__ = 1
else:
lowercase__ = input_ids.shape[0]
lowercase__ = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowercase__ = generate_kwargs.pop("prefix_length" , 0 )
if prefix_length > 0:
lowercase__ = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
lowercase__ = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowercase__ = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowercase__ = self.model.generate(input_ids=_lowercase , attention_mask=_lowercase , **_lowercase )
lowercase__ = generated_sequence.shape[0]
if self.framework == "pt":
lowercase__ = generated_sequence.reshape(_lowercase , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
lowercase__ = tf.reshape(_lowercase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def UpperCAmelCase ( self :Any , _lowercase :Tuple , _lowercase :str=ReturnType.FULL_TEXT , _lowercase :Dict=True ):
'''simple docstring'''
lowercase__ = model_outputs["generated_sequence"][0]
lowercase__ = model_outputs["input_ids"]
lowercase__ = model_outputs["prompt_text"]
lowercase__ = generated_sequence.numpy().tolist()
lowercase__ = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowercase__ = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowercase__ = self.tokenizer.decode(
_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowercase__ = 0
else:
lowercase__ = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , ) )
if return_type == ReturnType.FULL_TEXT:
lowercase__ = prompt_text + text[prompt_length:]
else:
lowercase__ = text[prompt_length:]
lowercase__ = {"generated_text": all_text}
records.append(_lowercase )
return records
| 655 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : List[str] ="perceiver"
def __init__( self : Union[str, Any] , lowerCAmelCase : List[str]=2_56 , lowerCAmelCase : str=12_80 , lowerCAmelCase : Union[str, Any]=7_68 , lowerCAmelCase : int=1 , lowerCAmelCase : Optional[int]=26 , lowerCAmelCase : Optional[int]=8 , lowerCAmelCase : Union[str, Any]=8 , lowerCAmelCase : str=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : Union[str, Any]="kv" , lowerCAmelCase : int=1 , lowerCAmelCase : str=1 , lowerCAmelCase : int="gelu" , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : str=0.02 , lowerCAmelCase : List[Any]=1e-12 , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : Optional[int]=2_62 , lowerCAmelCase : Optional[int]=20_48 , lowerCAmelCase : Tuple=56 , lowerCAmelCase : Any=[3_68, 4_96] , lowerCAmelCase : Any=16 , lowerCAmelCase : Any=19_20 , lowerCAmelCase : Union[str, Any]=16 , lowerCAmelCase : Any=[1, 16, 2_24, 2_24] , **lowerCAmelCase : Dict , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase )
__lowerCAmelCase : Any = num_latents
__lowerCAmelCase : Tuple = d_latents
__lowerCAmelCase : Optional[Any] = d_model
__lowerCAmelCase : int = num_blocks
__lowerCAmelCase : Optional[int] = num_self_attends_per_block
__lowerCAmelCase : Dict = num_self_attention_heads
__lowerCAmelCase : Optional[Any] = num_cross_attention_heads
__lowerCAmelCase : Tuple = qk_channels
__lowerCAmelCase : List[str] = v_channels
__lowerCAmelCase : int = cross_attention_shape_for_attention
__lowerCAmelCase : Tuple = self_attention_widening_factor
__lowerCAmelCase : List[Any] = cross_attention_widening_factor
__lowerCAmelCase : Optional[int] = hidden_act
__lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
__lowerCAmelCase : Optional[int] = initializer_range
__lowerCAmelCase : Dict = layer_norm_eps
__lowerCAmelCase : List[str] = use_query_residual
# masked language modeling attributes
__lowerCAmelCase : int = vocab_size
__lowerCAmelCase : Optional[Any] = max_position_embeddings
# image classification attributes
__lowerCAmelCase : Any = image_size
# flow attributes
__lowerCAmelCase : List[Any] = train_size
# multimodal autoencoding attributes
__lowerCAmelCase : Optional[int] = num_frames
__lowerCAmelCase : int = audio_samples_per_frame
__lowerCAmelCase : List[Any] = samples_per_patch
__lowerCAmelCase : Tuple = output_shape
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
__lowerCAmelCase : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowerCAmelCase : str = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""inputs""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> float:
"""simple docstring"""
return 1e-4
def SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCAmelCase : int = -1 , lowerCAmelCase : int = -1 , lowerCAmelCase : int = -1 , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[TensorType] = None , lowerCAmelCase : int = 3 , lowerCAmelCase : int = 40 , lowerCAmelCase : int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
if isinstance(lowerCAmelCase , lowerCAmelCase ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowerCAmelCase : Dict = compute_effective_axis_dimension(
lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowerCAmelCase : Any = preprocessor.num_special_tokens_to_add(lowerCAmelCase )
__lowerCAmelCase : str = compute_effective_axis_dimension(
lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
__lowerCAmelCase : str = [""" """.join(["""a"""] ) * seq_length] * batch_size
__lowerCAmelCase : List[str] = dict(preprocessor(lowerCAmelCase , return_tensors=lowerCAmelCase ) )
__lowerCAmelCase : List[str] = inputs.pop("""input_ids""" )
return inputs
elif isinstance(lowerCAmelCase , lowerCAmelCase ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowerCAmelCase : Union[str, Any] = compute_effective_axis_dimension(lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch )
__lowerCAmelCase : Any = self._generate_dummy_images(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : List[str] = dict(preprocessor(images=lowerCAmelCase , return_tensors=lowerCAmelCase ) )
__lowerCAmelCase : Any = inputs.pop("""pixel_values""" )
return inputs
else:
raise ValueError(
"""Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.""" )
| 218 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=a_ )
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : str =field(default="audio-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
lowerCamelCase : ClassVar[Features] =Features({"audio": Audio()} )
lowerCamelCase : ClassVar[Features] =Features({"labels": ClassLabel} )
lowerCamelCase : str ="audio"
lowerCamelCase : str ="labels"
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowerCAmelCase ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
__lowerCAmelCase : Optional[int] = copy.deepcopy(self )
__lowerCAmelCase : Tuple = self.label_schema.copy()
__lowerCAmelCase : Optional[int] = features[self.label_column]
__lowerCAmelCase : int = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> Dict[str, str]:
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 218 | 1 |
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
UpperCAmelCase__ : Optional[Any] = pytest.mark.integration
UpperCAmelCase__ : str = {"comet"}
UpperCAmelCase__ : Optional[Any] = importlib.util.find_spec("fairseq") is not None
UpperCAmelCase__ : Optional[int] = {"code_eval"}
UpperCAmelCase__ : List[Any] = os.name == "nt"
UpperCAmelCase__ : Optional[int] = {"bertscore", "frugalscore", "perplexity"}
UpperCAmelCase__ : int = importlib.util.find_spec("transformers") is not None
def A ( UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
@wraps(UpperCamelCase_ )
def wrapper(self : Optional[Any] , UpperCamelCase_ : List[str] ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("\"test requires Fairseq\"" )
else:
test_case(self , UpperCamelCase_ )
return wrapper
def A ( UpperCamelCase_ : List[Any] ) -> str:
'''simple docstring'''
@wraps(UpperCamelCase_ )
def wrapper(self : Optional[int] , UpperCamelCase_ : int ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("\"test requires transformers\"" )
else:
test_case(self , UpperCamelCase_ )
return wrapper
def A ( UpperCamelCase_ : Any ) -> int:
'''simple docstring'''
@wraps(UpperCamelCase_ )
def wrapper(self : Optional[int] , UpperCamelCase_ : Optional[Any] ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("\"test not supported on Windows\"" )
else:
test_case(self , UpperCamelCase_ )
return wrapper
def A ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@local
class A ( parameterized.TestCase ):
snake_case__ :Union[str, Any] = {}
snake_case__ :Optional[Any] = None
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = "[...]"
lowerCAmelCase__ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , __magic_name__ ) ).module_path )
lowerCAmelCase__ = datasets.load.import_main_class(metric_module.__name__ , dataset=__magic_name__ )
# check parameters
lowerCAmelCase__ = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(__magic_name__ , metric_module.__name__ ):
with self.use_local_metrics():
try:
lowerCAmelCase__ = doctest.testmod(__magic_name__ , verbose=__magic_name__ , raise_on_error=__magic_name__ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = "[...]"
lowerCAmelCase__ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , __magic_name__ ) ).module_path )
# run doctest
with self.use_local_metrics():
lowerCAmelCase__ = doctest.testmod(__magic_name__ , verbose=__magic_name__ , raise_on_error=__magic_name__ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](__magic_name__ ):
yield
else:
yield
@contextmanager
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
def load_local_metric(__magic_name__ : Union[str, Any] , *__magic_name__ : Any , **__magic_name__ : Any ):
return load_metric(os.path.join("metrics" , __magic_name__ ) , *__magic_name__ , **__magic_name__ )
with patch("datasets.load_metric" ) as mock_load_metric:
lowerCAmelCase__ = load_local_metric
yield
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Any , __magic_name__ : Optional[int] ):
"""simple docstring"""
def wrapper(__magic_name__ : Dict ):
lowerCAmelCase__ = contextmanager(__magic_name__ )
lowerCAmelCase__ = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("bleurt" )
def A ( UpperCamelCase_ : str ) -> Any:
'''simple docstring'''
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("sv" , "" , "" ) # handle pytest cli flags
class A ( SCREAMING_SNAKE_CASE__ ):
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Optional[int] ):
"""simple docstring"""
assert len(input_dict["input_ids"] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("bleurt.score._create_predictor" ) as mock_create_predictor:
lowerCAmelCase__ = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("bertscore" )
def A ( UpperCamelCase_ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
import torch
def bert_cos_score_idf(UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : List[str] ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(UpperCamelCase_ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("bert_score.scorer.get_model" ), patch(
"bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf:
lowerCAmelCase__ = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("comet" )
def A ( UpperCamelCase_ : Optional[int] ) -> Any:
'''simple docstring'''
def load_from_checkpoint(UpperCamelCase_ : Tuple ):
class A :
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Optional[int] , *__magic_name__ : int , **__magic_name__ : Dict ):
"""simple docstring"""
assert len(__magic_name__ ) == 2
lowerCAmelCase__ = [0.19, 0.92]
return scores, sum(__magic_name__ ) / len(__magic_name__ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("comet.download_model" ) as mock_download_model:
lowerCAmelCase__ = None
with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint:
lowerCAmelCase__ = load_from_checkpoint
yield
def A ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = load_metric(os.path.join("metrics" , "seqeval" ) )
lowerCAmelCase__ = "ERROR"
lowerCAmelCase__ = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(UpperCamelCase_ , match=re.escape(UpperCamelCase_ ) ):
metric.compute(predictions=[] , references=[] , scheme=UpperCamelCase_ )
| 48 | import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def _a ( lowercase__ : np.ndarray ):
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def _a ( lowercase__ : np.ndarray , lowercase__ : np.ndarray , lowercase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = np.nan
for i in range(lowercase__ ):
SCREAMING_SNAKE_CASE__ : int = features[:, labels == i]
SCREAMING_SNAKE_CASE__ : int = data.mean(1 )
# Centralize the data of class i
SCREAMING_SNAKE_CASE__ : Optional[Any] = data - column_reshape(lowercase__ )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(lowercase__ , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
SCREAMING_SNAKE_CASE__ : Any = np.dot(lowercase__ , centered_data.T )
return covariance_sum / features.shape[1]
def _a ( lowercase__ : np.ndarray , lowercase__ : np.ndarray , lowercase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = features.mean(1 )
SCREAMING_SNAKE_CASE__ : List[str] = np.nan
for i in range(lowercase__ ):
SCREAMING_SNAKE_CASE__ : Tuple = features[:, labels == i]
SCREAMING_SNAKE_CASE__ : int = data.shape[1]
SCREAMING_SNAKE_CASE__ : List[Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(lowercase__ ) - column_reshape(lowercase__ ) , (column_reshape(lowercase__ ) - column_reshape(lowercase__ )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
SCREAMING_SNAKE_CASE__ : str = device_data * np.dot(
column_reshape(lowercase__ ) - column_reshape(lowercase__ ) , (column_reshape(lowercase__ ) - column_reshape(lowercase__ )).T , )
return covariance_sum / features.shape[1]
def _a ( lowercase__ : np.ndarray , lowercase__ : int ):
'''simple docstring'''
if features.any():
SCREAMING_SNAKE_CASE__ : Any = features.mean(1 )
# Center the dataset
SCREAMING_SNAKE_CASE__ : Optional[Any] = features - np.reshape(lowercase__ , (data_mean.size, 1) )
SCREAMING_SNAKE_CASE__ : List[Any] = np.dot(lowercase__ , centered_data.T ) / features.shape[1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = np.linalg.eigh(lowercase__ )
# Take all the columns in the reverse order (-1), and then takes only the first
SCREAMING_SNAKE_CASE__ : List[Any] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.dot(filtered_eigenvectors.T , lowercase__ )
logging.info('Principal Component Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowercase__ )
logging.error('Dataset empty' )
raise AssertionError
def _a ( lowercase__ : np.ndarray , lowercase__ : np.ndarray , lowercase__ : int , lowercase__ : int ):
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = eigh(
covariance_between_classes(lowercase__ , lowercase__ , lowercase__ ) , covariance_within_classes(lowercase__ , lowercase__ , lowercase__ ) , )
SCREAMING_SNAKE_CASE__ : Tuple = eigenvectors[:, ::-1][:, :dimensions]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = np.linalg.svd(lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = svd_matrix[:, 0:dimensions]
SCREAMING_SNAKE_CASE__ : int = np.dot(filtered_svd_matrix.T , lowercase__ )
logging.info('Linear Discriminant Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowercase__ )
logging.error('Dataset empty' )
raise AssertionError
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
SCREAMING_SNAKE_CASE__ : Tuple = np.array([0, 0, 0, 1, 1] )
SCREAMING_SNAKE_CASE__ : str = 2
SCREAMING_SNAKE_CASE__ : Dict = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(lowercase__ ) as error_info:
SCREAMING_SNAKE_CASE__ : Optional[int] = linear_discriminant_analysis(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if isinstance(lowercase__ , np.ndarray ):
raise AssertionError(
'Did not raise AssertionError for dimensions > classes' )
assert error_info.type is AssertionError
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
SCREAMING_SNAKE_CASE__ : List[str] = 2
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]] )
with pytest.raises(lowercase__ ) as error_info:
SCREAMING_SNAKE_CASE__ : int = principal_component_analysis(lowercase__ , lowercase__ )
if not np.allclose(lowercase__ , lowercase__ ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCamelCase__ ( lowerCamelCase__ ):
"""simple docstring"""
@staticmethod
@abstractmethod
def snake_case ( __A : Any ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def snake_case ( self : Any ):
"""simple docstring"""
raise NotImplementedError()
| 700 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ : Any = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Optional[int] = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__magic_name__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 602 | 0 |
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
lowerCamelCase__ = {
'''bart''': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''bert''': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-base-cased-finetuned-mrpc''': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''dpr''': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''gpt2''': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlnet''': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm''': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm-roberta''': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''transfo-xl''': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''openai-gpt''': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''roberta''': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''layoutlm''': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''roberta-large-mnli''': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''camembert''': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''flaubert''': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert''': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert-base-distilled-squad''': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert''': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert-visual-feature-encoder''': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''ctrl''': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''albert''': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''t5''': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''electra''': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''wav2vec2''': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def A(__a: Optional[Any] , __a: int , __a: Any , __a: Dict , __a: Optional[int]=False , __a: List[str]=True ):
if model_type not in MODEL_CLASSES:
raise ValueError(F"Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}." )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
lowerCAmelCase_ = cached_file(__a , __a , force_download=not use_cached_models )
lowerCAmelCase_ = config_class.from_json_file(__a )
lowerCAmelCase_ = True
lowerCAmelCase_ = True
print(F"Building TensorFlow model from configuration: {config}" )
lowerCAmelCase_ = model_class(__a )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
lowerCAmelCase_ = cached_file(
__a , __a , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
lowerCAmelCase_ = load_pytorch_checkpoint_in_tfa_model(__a , __a )
if compare_with_pt_model:
lowerCAmelCase_ = tf_model(tf_model.dummy_inputs , training=__a ) # build the network
lowerCAmelCase_ = torch.load(__a , map_location="cpu" )
lowerCAmelCase_ = pt_model_class.from_pretrained(
pretrained_model_name_or_path=__a , config=__a , state_dict=__a )
with torch.no_grad():
lowerCAmelCase_ = pt_model(**pt_model.dummy_inputs )
lowerCAmelCase_ = pto[0].numpy()
lowerCAmelCase_ = tfo[0].numpy()
lowerCAmelCase_ = np.amax(np.abs(np_pt - np_tf ) )
print(F"Max absolute difference between models outputs {diff}" )
assert diff <= 2E-2, F"Error, model absolute difference is >2e-2: {diff}"
# Save pytorch-model
print(F"Save TensorFlow model to {tf_dump_path}" )
tf_model.save_weights(__a , save_format="h5" )
def A(__a: Optional[int] , __a: Tuple , __a: int=None , __a: List[Any]=None , __a: str=False , __a: Any=False , __a: Tuple=False , __a: Optional[int]=False , ):
if args_model_type is None:
lowerCAmelCase_ = list(MODEL_CLASSES.keys() )
else:
lowerCAmelCase_ = [args_model_type]
for j, model_type in enumerate(__a , start=1 ):
print("=" * 100 )
print(F" Converting model type {j}/{len(__a )}: {model_type}" )
print("=" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(F"Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}." )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
lowerCAmelCase_ = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
lowerCAmelCase_ = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(__a , __a ) , start=1 ):
print("-" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F" Skipping finetuned checkpoint {model_shortcut_name}" )
continue
lowerCAmelCase_ = model_shortcut_name
elif only_convert_finetuned_models:
print(F" Skipping not finetuned checkpoint {model_shortcut_name}" )
continue
print(
F" Converting checkpoint {i}/{len(__a )}: {model_shortcut_name} - model_type {model_type}" )
print("-" * 100 )
if config_shortcut_name in aws_config_map:
lowerCAmelCase_ = cached_file(__a , __a , force_download=not use_cached_models )
else:
lowerCAmelCase_ = config_shortcut_name
if model_shortcut_name in aws_model_maps:
lowerCAmelCase_ = cached_file(__a , __a , force_download=not use_cached_models )
else:
lowerCAmelCase_ = model_shortcut_name
if os.path.isfile(__a ):
lowerCAmelCase_ = "converted_model"
convert_pt_checkpoint_to_tf(
model_type=__a , pytorch_checkpoint_path=__a , config_file=__a , tf_dump_path=os.path.join(__a , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=__a , )
if remove_cached_files:
os.remove(__a )
os.remove(__a )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_dump_path''', default=None, type=str, required=True, help='''Path to the output Tensorflow dump file.'''
)
parser.add_argument(
'''--model_type''',
default=None,
type=str,
help=(
F'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '''
'''convert all the models from AWS.'''
),
)
parser.add_argument(
'''--pytorch_checkpoint_path''',
default=None,
type=str,
help=(
'''Path to the PyTorch checkpoint path or shortcut name to download from AWS. '''
'''If not given, will download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
help=(
'''The config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture. If not given and '''
'''--pytorch_checkpoint_path is not given or is a shortcut name '''
'''use the configuration associated to the shortcut name on the AWS'''
),
)
parser.add_argument(
'''--compare_with_pt_model''', action='''store_true''', help='''Compare Tensorflow and PyTorch model predictions.'''
)
parser.add_argument(
'''--use_cached_models''',
action='''store_true''',
help='''Use cached models if possible instead of updating to latest checkpoint versions.''',
)
parser.add_argument(
'''--remove_cached_files''',
action='''store_true''',
help='''Remove pytorch models after conversion (save memory when converting in batches).''',
)
parser.add_argument('''--only_convert_finetuned_models''', action='''store_true''', help='''Only convert finetuned models.''')
lowerCamelCase__ = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 122 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __magic_name__ (unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=None , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , _a=False , ) -> Any:
lowerCAmelCase_ = size if size is not None else {"height": 20, "width": 20}
lowerCAmelCase_ = crop_size if crop_size is not None else {"height": 18, "width": 18}
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = image_size
lowerCAmelCase_ = min_resolution
lowerCAmelCase_ = max_resolution
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = size
lowerCAmelCase_ = do_center_crop
lowerCAmelCase_ = crop_size
lowerCAmelCase_ = do_normalize
lowerCAmelCase_ = image_mean
lowerCAmelCase_ = image_std
lowerCAmelCase_ = do_reduce_labels
def __a ( self ) -> str:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def A():
lowerCAmelCase_ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
lowerCAmelCase_ = Image.open(dataset[0]["file"] )
lowerCAmelCase_ = Image.open(dataset[1]["file"] )
return image, map
def A():
lowerCAmelCase_ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
lowerCAmelCase_ = Image.open(ds[0]["file"] )
lowerCAmelCase_ = Image.open(ds[1]["file"] )
lowerCAmelCase_ = Image.open(ds[2]["file"] )
lowerCAmelCase_ = Image.open(ds[3]["file"] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = BeitImageProcessor if is_vision_available() else None
def __a ( self ) -> int:
lowerCAmelCase_ = BeitImageProcessingTester(self )
@property
def __a ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , "do_resize" ) )
self.assertTrue(hasattr(_a , "size" ) )
self.assertTrue(hasattr(_a , "do_center_crop" ) )
self.assertTrue(hasattr(_a , "center_crop" ) )
self.assertTrue(hasattr(_a , "do_normalize" ) )
self.assertTrue(hasattr(_a , "image_mean" ) )
self.assertTrue(hasattr(_a , "image_std" ) )
def __a ( self ) -> Dict:
lowerCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 20, "width": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
self.assertEqual(image_processor.do_reduce_labels , _a )
lowerCAmelCase_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_a )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
self.assertEqual(image_processor.do_reduce_labels , _a )
def __a ( self ) -> str:
pass
def __a ( self ) -> List[Any]:
# Initialize image_processing
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase_ = image_processing(_a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __a ( self ) -> Dict:
# Initialize image_processing
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase_ = image_processing(_a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __a ( self ) -> Tuple:
# Initialize image_processing
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase_ = image_processing(_a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __a ( self ) -> List[str]:
# Initialize image_processing
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
lowerCAmelCase_ = []
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0] , maps[0] , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test batched
lowerCAmelCase_ = image_processing(_a , _a , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test not batched input (PIL images)
lowerCAmelCase_ , lowerCAmelCase_ = prepare_semantic_single_inputs()
lowerCAmelCase_ = image_processing(_a , _a , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test batched input (PIL images)
lowerCAmelCase_ , lowerCAmelCase_ = prepare_semantic_batch_inputs()
lowerCAmelCase_ = image_processing(_a , _a , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
2,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
def __a ( self ) -> Any:
# Initialize image_processing
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
lowerCAmelCase_ , lowerCAmelCase_ = prepare_semantic_single_inputs()
lowerCAmelCase_ = image_processing(_a , _a , return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 150 )
lowerCAmelCase_ = True
lowerCAmelCase_ = image_processing(_a , _a , return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
| 122 | 1 |
"""simple docstring"""
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
return EnvironmentCommand()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
return EnvironmentCommand(args.accelerate_config_file )
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
@staticmethod
def _UpperCAmelCase ( lowerCAmelCase_ : ArgumentParser):
"""simple docstring"""
lowercase_ = parser.add_parser("""env""")
download_parser.set_defaults(func=lowerCAmelCase_)
download_parser.add_argument(
"""--accelerate-config_file""" , default=lowerCAmelCase_ , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=lowerCAmelCase_)
def __init__( self : Any , lowerCAmelCase_ : Optional[Any] , *lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = accelerate_config_file
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = """not installed"""
if is_safetensors_available():
import safetensors
lowercase_ = safetensors.__version__
elif importlib.util.find_spec("""safetensors""") is not None:
import safetensors
lowercase_ = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
lowercase_ = """not installed"""
lowercase_ = lowercase_ = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
lowercase_ = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(lowerCAmelCase_):
lowercase_ = load_config_from_file(self._accelerate_config_file).to_dict()
lowercase_ = (
"""\n""".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()])
if isinstance(lowerCAmelCase_ , lowerCAmelCase_)
else F'''\t{accelerate_config}'''
)
lowercase_ = """not installed"""
lowercase_ = """NA"""
if is_torch_available():
import torch
lowercase_ = torch.__version__
lowercase_ = torch.cuda.is_available()
lowercase_ = """not installed"""
lowercase_ = """NA"""
if is_tf_available():
import tensorflow as tf
lowercase_ = tf.__version__
try:
# deprecated in v2.1
lowercase_ = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
lowercase_ = bool(tf.config.list_physical_devices("""GPU"""))
lowercase_ = """not installed"""
lowercase_ = """not installed"""
lowercase_ = """not installed"""
lowercase_ = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
lowercase_ = flax.__version__
lowercase_ = jax.__version__
lowercase_ = jaxlib.__version__
lowercase_ = jax.lib.xla_bridge.get_backend().platform
lowercase_ = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": F'''{safetensors_version}''',
"""Accelerate version""": F'''{accelerate_version}''',
"""Accelerate config""": F'''{accelerate_config_str}''',
"""PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''',
"""Tensorflow version (GPU?)""": F'''{tf_version} ({tf_cuda_available})''',
"""Flax version (CPU?/GPU?/TPU?)""": F'''{flax_version} ({jax_backend})''',
"""Jax version""": F'''{jax_version}''',
"""JaxLib version""": F'''{jaxlib_version}''',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""")
print(self.format_dict(lowerCAmelCase_))
return info
@staticmethod
def _UpperCAmelCase ( lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()]) + "\n"
| 100 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase : List[Any] = {"tokenization_bertweet": ["BertweetTokenizer"]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 100 | 1 |
import os
import numpy
import onnx
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = a.name
snake_case = b.name
snake_case = ''''''
snake_case = ''''''
snake_case = a == b
snake_case = name_a
snake_case = name_b
return res
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCamelCase_ ,UpperCamelCase_ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g ,UpperCamelCase_ ,UpperCamelCase_ )
_graph_replace_input_with(node_proto.attribute[1].g ,UpperCamelCase_ ,UpperCamelCase_ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g ,UpperCamelCase_ ,UpperCamelCase_ )
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ )
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = list(model.graph.initializer )
snake_case = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
snake_case = inits[i].name
snake_case = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph ,UpperCamelCase_ ,UpperCamelCase_ )
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = os.path.dirname(UpperCamelCase_ )
snake_case = os.path.basename(UpperCamelCase_ )
snake_case = onnx.load(os.path.join(UpperCamelCase_ ,UpperCamelCase_ ) )
snake_case = list(model.graph.initializer )
snake_case = set()
snake_case = {}
snake_case = []
snake_case = 0
for i in range(len(UpperCamelCase_ ) ):
if i in dup_set:
continue
for j in range(i + 1 ,len(UpperCamelCase_ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] ,inits[j] ):
dup_set.add(UpperCamelCase_ )
dup_set.add(UpperCamelCase_ )
snake_case = inits[j].data_type
snake_case = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''' ,UpperCamelCase_ )
total_reduced_size += mem_size
snake_case = inits[i].name
snake_case = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCamelCase_ )
else:
snake_case = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''' ,total_reduced_size / 10_24 / 10_24 / 10_24 ,'''GB''' )
snake_case = sorted(UpperCamelCase_ )
_remove_dup_initializers_from_model(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ )
snake_case = '''optimized_''' + model_file_name
snake_case = os.path.join(UpperCamelCase_ ,UpperCamelCase_ )
onnx.save(UpperCamelCase_ ,UpperCamelCase_ )
return new_model
| 550 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=7 , __snake_case=3 , __snake_case=3_0 , __snake_case=4_0_0 , __snake_case=True , __snake_case=None , __snake_case=True , __snake_case=1 / 2_5_5 , __snake_case=True , __snake_case=[0.5, 0.5, 0.5] , __snake_case=[0.5, 0.5, 0.5] , __snake_case=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case = size if size is not None else {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3}
snake_case = parent
snake_case = batch_size
snake_case = num_channels
snake_case = min_resolution
snake_case = max_resolution
snake_case = do_resize
snake_case = size
snake_case = do_rescale
snake_case = rescale_factor
snake_case = do_normalize
snake_case = image_mean
snake_case = image_std
snake_case = do_pad
def a_ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def a_ ( self , __snake_case , __snake_case=False ):
if not batched:
snake_case = image_inputs[0]
if isinstance(__snake_case , Image.Image ):
snake_case , snake_case = image.size
else:
snake_case , snake_case = image.shape[1], image.shape[2]
if w < h:
snake_case = int(self.size['''shortest_edge'''] * h / w )
snake_case = self.size['''shortest_edge''']
elif w > h:
snake_case = self.size['''shortest_edge''']
snake_case = int(self.size['''shortest_edge'''] * w / h )
else:
snake_case = self.size['''shortest_edge''']
snake_case = self.size['''shortest_edge''']
else:
snake_case = []
for image in image_inputs:
snake_case , snake_case = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case = max(__snake_case , key=lambda __snake_case : item[0] )[0]
snake_case = max(__snake_case , key=lambda __snake_case : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A__ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = DetrImageProcessor if is_vision_available() else None
def a_ ( self ):
snake_case = DetrImageProcessingTester(self )
@property
def a_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self ):
snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , '''image_mean''' ) )
self.assertTrue(hasattr(__snake_case , '''image_std''' ) )
self.assertTrue(hasattr(__snake_case , '''do_normalize''' ) )
self.assertTrue(hasattr(__snake_case , '''do_rescale''' ) )
self.assertTrue(hasattr(__snake_case , '''rescale_factor''' ) )
self.assertTrue(hasattr(__snake_case , '''do_resize''' ) )
self.assertTrue(hasattr(__snake_case , '''size''' ) )
self.assertTrue(hasattr(__snake_case , '''do_pad''' ) )
def a_ ( self ):
snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __snake_case )
snake_case = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__snake_case )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2, '''longest_edge''': 8_4} )
self.assertEqual(image_processor.do_pad , __snake_case )
def a_ ( self ):
pass
def a_ ( self ):
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
snake_case , snake_case = self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case , snake_case = self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
snake_case = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a_ ( self ):
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
snake_case , snake_case = self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
snake_case , snake_case = self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a_ ( self ):
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
snake_case , snake_case = self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
snake_case , snake_case = self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def a_ ( self ):
# prepare image and target
snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
snake_case = json.loads(f.read() )
snake_case = {'''image_id''': 3_9_7_6_9, '''annotations''': target}
# encode them
snake_case = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50''' )
snake_case = image_processing(images=__snake_case , annotations=__snake_case , return_tensors='''pt''' )
# verify pixel values
snake_case = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , __snake_case )
snake_case = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __snake_case , atol=1E-4 ) )
# verify area
snake_case = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __snake_case ) )
# verify boxes
snake_case = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __snake_case )
snake_case = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __snake_case , atol=1E-3 ) )
# verify image_id
snake_case = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __snake_case ) )
# verify is_crowd
snake_case = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __snake_case ) )
# verify class_labels
snake_case = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __snake_case ) )
# verify orig_size
snake_case = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __snake_case ) )
# verify size
snake_case = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __snake_case ) )
@slow
def a_ ( self ):
# prepare image, target and masks_path
snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
snake_case = json.loads(f.read() )
snake_case = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9_7_6_9, '''segments_info''': target}
snake_case = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
snake_case = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50-panoptic''' )
snake_case = image_processing(images=__snake_case , annotations=__snake_case , masks_path=__snake_case , return_tensors='''pt''' )
# verify pixel values
snake_case = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , __snake_case )
snake_case = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __snake_case , atol=1E-4 ) )
# verify area
snake_case = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __snake_case ) )
# verify boxes
snake_case = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __snake_case )
snake_case = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __snake_case , atol=1E-3 ) )
# verify image_id
snake_case = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __snake_case ) )
# verify is_crowd
snake_case = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __snake_case ) )
# verify class_labels
snake_case = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __snake_case ) )
# verify masks
snake_case = 8_2_2_8_7_3
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __snake_case )
# verify orig_size
snake_case = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __snake_case ) )
# verify size
snake_case = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __snake_case ) )
| 550 | 1 |
'''simple docstring'''
from PIL import Image
def _snake_case ( lowercase , lowercase ) -> Image:
__a : List[Any] = (2_5_9 * (level + 2_5_5)) / (2_5_5 * (2_5_9 - level))
def contrast(lowercase ) -> int:
return int(1_2_8 + factor * (c - 1_2_8) )
return img.point(lowercase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
__SCREAMING_SNAKE_CASE : Dict = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png') | 697 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__SCREAMING_SNAKE_CASE : Optional[int] = trt.Logger(trt.Logger.WARNING)
__SCREAMING_SNAKE_CASE : Tuple = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__)
__SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
if args.tokenizer_name:
__SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
__SCREAMING_SNAKE_CASE : List[Any] = args.per_device_eval_batch_size
__SCREAMING_SNAKE_CASE : int = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__SCREAMING_SNAKE_CASE : Optional[Any] = True
__SCREAMING_SNAKE_CASE : Tuple = 'temp_engine/bert-fp32.engine'
if args.fpaa:
__SCREAMING_SNAKE_CASE : Dict = 'temp_engine/bert-fp16.engine'
if args.inta:
__SCREAMING_SNAKE_CASE : Tuple = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
__SCREAMING_SNAKE_CASE : Optional[Any] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__SCREAMING_SNAKE_CASE : List[Any] = [network.get_input(i) for i in range(network.num_inputs)]
__SCREAMING_SNAKE_CASE : List[Any] = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__SCREAMING_SNAKE_CASE : Tuple = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__SCREAMING_SNAKE_CASE : Dict = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__SCREAMING_SNAKE_CASE : Union[str, Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
__a : Dict = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
__a : List[Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
__a : str = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowercase )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowercase )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowercase )
# start time
__a : Optional[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowercase ) for d_inp in d_inputs] + [int(lowercase ), int(lowercase )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowercase , lowercase , lowercase )
cuda.memcpy_dtoh_async(lowercase , lowercase , lowercase )
# Synchronize the stream and take time
stream.synchronize()
# end time
__a : str = time.time()
__a : Any = end_time - start_time
__a : Optional[int] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__SCREAMING_SNAKE_CASE : Optional[Any] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__SCREAMING_SNAKE_CASE : List[str] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__SCREAMING_SNAKE_CASE : int = raw_datasets['validation'].column_names
__SCREAMING_SNAKE_CASE : Tuple = 'question' if 'question' in column_names else column_names[0]
__SCREAMING_SNAKE_CASE : List[Any] = 'context' if 'context' in column_names else column_names[1]
__SCREAMING_SNAKE_CASE : Tuple = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
__SCREAMING_SNAKE_CASE : Dict = min(args.max_seq_length, tokenizer.model_max_length)
def _snake_case ( lowercase ) -> Tuple:
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
__a : Optional[Any] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
__a : Optional[int] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowercase , stride=args.doc_stride , return_overflowing_tokens=lowercase , return_offsets_mapping=lowercase , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
__a : Optional[Any] = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
__a : Optional[Any] = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
__a : Dict = tokenized_examples.sequence_ids(lowercase )
__a : Optional[Any] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
__a : Union[str, Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
__a : int = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
__SCREAMING_SNAKE_CASE : int = raw_datasets['validation']
# Validation Feature Creation
__SCREAMING_SNAKE_CASE : Union[str, Any] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
__SCREAMING_SNAKE_CASE : List[Any] = default_data_collator
__SCREAMING_SNAKE_CASE : Union[str, Any] = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
__SCREAMING_SNAKE_CASE : List[str] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _snake_case ( lowercase , lowercase , lowercase , lowercase="eval" ) -> Any:
# Post-processing: we match the start logits and end logits to answers in the original context.
__a : List[str] = postprocess_qa_predictions(
examples=lowercase , features=lowercase , predictions=lowercase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowercase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
__a : List[str] = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
__a : List[str] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
__a : Optional[Any] = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowercase , label_ids=lowercase )
__SCREAMING_SNAKE_CASE : List[Any] = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _snake_case ( lowercase ) -> Optional[int]:
return trt.volume(engine.get_binding_shape(lowercase ) ) * engine.get_binding_dtype(lowercase ).itemsize
# Allocate device memory for inputs and outputs.
__SCREAMING_SNAKE_CASE : List[str] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__SCREAMING_SNAKE_CASE : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__SCREAMING_SNAKE_CASE : Union[str, Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__SCREAMING_SNAKE_CASE : str = cuda.mem_alloc(h_outputa.nbytes)
__SCREAMING_SNAKE_CASE : Tuple = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__SCREAMING_SNAKE_CASE : Tuple = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0.0
__SCREAMING_SNAKE_CASE : str = 0
__SCREAMING_SNAKE_CASE : str = timeit.default_timer()
__SCREAMING_SNAKE_CASE : Dict = None
for step, batch in enumerate(eval_dataloader):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = outputs
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(start_logits)
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__SCREAMING_SNAKE_CASE : Optional[int] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__SCREAMING_SNAKE_CASE : Dict = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__SCREAMING_SNAKE_CASE : List[str] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__SCREAMING_SNAKE_CASE : List[str] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__SCREAMING_SNAKE_CASE : Tuple = nested_truncate(all_preds, len(eval_dataset))
__SCREAMING_SNAKE_CASE : str = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000))
logger.info('Total Number of Inference = %d', niter)
__SCREAMING_SNAKE_CASE : Optional[int] = post_processing_function(eval_examples, eval_dataset, all_preds)
__SCREAMING_SNAKE_CASE : List[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''') | 697 | 1 |
from collections.abc import Sequence
def a ( A__ , A__ = False ) -> Union[str, Any]:
'''simple docstring'''
if not arr:
return 0
SCREAMING_SNAKE_CASE__ : Optional[int] = 0 if allow_empty_subarrays else float('''-inf''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0.0
for num in arr:
SCREAMING_SNAKE_CASE__ : Optional[int] = max(0 if allow_empty_subarrays else num , curr_sum + num )
SCREAMING_SNAKE_CASE__ : Optional[Any] = max(__lowerCAmelCase , __lowerCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
a_ :Dict = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'''{max_subarray_sum(nums) = }''')
| 35 |
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__lowerCAmelCase ) )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
# Base Case
if index == len(__lowerCAmelCase ):
return True
# Recursive Step
for i in range(__lowerCAmelCase ):
if valid_coloring(graph[index] , __lowerCAmelCase , __lowerCAmelCase ):
# Color current vertex
_snake_case : int = i
# Validate coloring
if util_color(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , index + 1 ):
return True
# Backtrack
_snake_case : Optional[Any] = -1
return False
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : str = [-1] * len(__lowerCAmelCase )
if util_color(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , 0 ):
return colored_vertices
return []
| 304 | 0 |
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
A = CpmAntTokenizer
A = False
def __a ( self ):
super().setUp()
SCREAMING_SNAKE_CASE : Dict = [
'<d>',
'</d>',
'<s>',
'</s>',
'</_>',
'<unk>',
'<pad>',
'</n>',
'我',
'是',
'C',
'P',
'M',
'A',
'n',
't',
]
SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
@tooslow
def __a ( self ):
SCREAMING_SNAKE_CASE : Dict = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' )
SCREAMING_SNAKE_CASE : Optional[Any] = '今天天气真好!'
SCREAMING_SNAKE_CASE : Optional[int] = ['今天', '天气', '真', '好', '!']
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[str] = '今天天气真好!'
SCREAMING_SNAKE_CASE : Union[str, Any] = [tokenizer.bos_token] + tokens
SCREAMING_SNAKE_CASE : Dict = [6, 9802, 14962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) ,__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.decode(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
| 705 |
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A = 0
A = False
A = 3.0
class _a ( unittest.TestCase ):
"""simple docstring"""
def __a ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() ,{} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() ,{'a': 2} )
self.assertDictEqual(MockClass(a=2 ,b=__SCREAMING_SNAKE_CASE ).to_kwargs() ,{'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 ,c=2.25 ).to_kwargs() ,{'a': 2, 'c': 2.25} )
@require_cuda
def __a ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
SCREAMING_SNAKE_CASE : Union[str, Any] = GradScalerKwargs(init_scale=1024 ,growth_factor=2 )
AcceleratorState._reset_state()
SCREAMING_SNAKE_CASE : Optional[int] = Accelerator(mixed_precision='fp16' ,kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
SCREAMING_SNAKE_CASE : int = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale ,1024.0 )
self.assertEqual(scaler._growth_factor ,2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor ,0.5 )
self.assertEqual(scaler._growth_interval ,2000 )
self.assertEqual(scaler._enabled ,__SCREAMING_SNAKE_CASE )
@require_multi_gpu
def __a ( self ):
SCREAMING_SNAKE_CASE : Optional[int] = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(__SCREAMING_SNAKE_CASE ,env=os.environ.copy() )
if __name__ == "__main__":
__UpperCAmelCase = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
__UpperCAmelCase = Accelerator(kwargs_handlers=[ddp_scaler])
__UpperCAmelCase = torch.nn.Linear(100, 200)
__UpperCAmelCase = accelerator.prepare(model)
# Check the values changed in kwargs
__UpperCAmelCase = ''
__UpperCAmelCase = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 220 | 0 |
'''simple docstring'''
import torch
from diffusers import DiffusionPipeline
class UpperCAmelCase_ ( A ):
'''simple docstring'''
def __init__( self : int , snake_case__ : str , snake_case__ : List[Any] ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=snake_case__ , scheduler=snake_case__ )
def __call__( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : Optional[int] = self.unet(snake_case__ , snake_case__ ).sample
UpperCAmelCase__ : int = self.scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
UpperCAmelCase__ : Dict = scheduler_output - scheduler_output + torch.ones_like(snake_case__ )
return result
| 199 |
'''simple docstring'''
def snake_case_ ( lowercase__ ):
if len(lowercase__ ) <= 1:
return [tuple(lowercase__ )]
UpperCAmelCase__ : Union[str, Any] = []
def generate(lowercase__ , lowercase__ ):
UpperCAmelCase__ : str = [0] * n
res.append(tuple(lowercase__ ) )
UpperCAmelCase__ : Any = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
UpperCAmelCase__ , UpperCAmelCase__ : str = arr[i], arr[0]
else:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = arr[i], arr[c[i]]
res.append(tuple(lowercase__ ) )
c[i] += 1
UpperCAmelCase__ : List[str] = 0
else:
UpperCAmelCase__ : Dict = 0
i += 1
generate(len(lowercase__ ) , lowercase__ )
return res
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = input("""Enter numbers separated by a comma:\n""").strip()
SCREAMING_SNAKE_CASE = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 199 | 1 |
from ..utils import DummyObject, requires_backends
class UpperCamelCase ( metaclass=snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = ["flax", "transformers"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""flax""", """transformers"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""flax""", """transformers"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""flax""", """transformers"""] )
class UpperCamelCase ( metaclass=snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ["flax", "transformers"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""flax""", """transformers"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""flax""", """transformers"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""flax""", """transformers"""] )
class UpperCamelCase ( metaclass=snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = ["flax", "transformers"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""flax""", """transformers"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""flax""", """transformers"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""flax""", """transformers"""] )
class UpperCamelCase ( metaclass=snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = ["flax", "transformers"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""flax""", """transformers"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""flax""", """transformers"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""flax""", """transformers"""] )
| 713 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : torch.FloatTensor
class UpperCamelCase ( snake_case , snake_case ):
"""simple docstring"""
@register_to_config
def __init__( self ,UpperCAmelCase_ = 3 ,UpperCAmelCase_ = 3 ,UpperCAmelCase_ = ("DownEncoderBlock2D",) ,UpperCAmelCase_ = ("UpDecoderBlock2D",) ,UpperCAmelCase_ = (64,) ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = "silu" ,UpperCAmelCase_ = 3 ,UpperCAmelCase_ = 32 ,UpperCAmelCase_ = 2_56 ,UpperCAmelCase_ = 32 ,UpperCAmelCase_ = None ,UpperCAmelCase_ = 0.18215 ,UpperCAmelCase_ = "group" ,):
super().__init__()
# pass init params to Encoder
_lowercase : List[Any] = Encoder(
in_channels=UpperCAmelCase_ ,out_channels=UpperCAmelCase_ ,down_block_types=UpperCAmelCase_ ,block_out_channels=UpperCAmelCase_ ,layers_per_block=UpperCAmelCase_ ,act_fn=UpperCAmelCase_ ,norm_num_groups=UpperCAmelCase_ ,double_z=UpperCAmelCase_ ,)
_lowercase : Tuple = vq_embed_dim if vq_embed_dim is not None else latent_channels
_lowercase : int = nn.Convad(UpperCAmelCase_ ,UpperCAmelCase_ ,1 )
_lowercase : Union[str, Any] = VectorQuantizer(UpperCAmelCase_ ,UpperCAmelCase_ ,beta=0.25 ,remap=UpperCAmelCase_ ,sane_index_shape=UpperCAmelCase_ )
_lowercase : Union[str, Any] = nn.Convad(UpperCAmelCase_ ,UpperCAmelCase_ ,1 )
# pass init params to Decoder
_lowercase : Union[str, Any] = Decoder(
in_channels=UpperCAmelCase_ ,out_channels=UpperCAmelCase_ ,up_block_types=UpperCAmelCase_ ,block_out_channels=UpperCAmelCase_ ,layers_per_block=UpperCAmelCase_ ,act_fn=UpperCAmelCase_ ,norm_num_groups=UpperCAmelCase_ ,norm_type=UpperCAmelCase_ ,)
@apply_forward_hook
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = True ):
_lowercase : Any = self.encoder(UpperCAmelCase_ )
_lowercase : List[Any] = self.quant_conv(UpperCAmelCase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=UpperCAmelCase_ )
@apply_forward_hook
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = False ,UpperCAmelCase_ = True ):
# also go through quantization layer
if not force_not_quantize:
_lowercase , _lowercase , _lowercase : Union[str, Any] = self.quantize(UpperCAmelCase_ )
else:
_lowercase : int = h
_lowercase : Union[str, Any] = self.post_quant_conv(UpperCAmelCase_ )
_lowercase : List[Any] = self.decoder(UpperCAmelCase_ ,quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = True ):
_lowercase : List[Any] = sample
_lowercase : Optional[Any] = self.encode(UpperCAmelCase_ ).latents
_lowercase : int = self.decode(UpperCAmelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCAmelCase_ )
| 600 | 0 |
'''simple docstring'''
from pathlib import Path
import numpy as np
from PIL import Image
def _snake_case ( A ) -> np.ndarray:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_989 * r + 0.5_870 * g + 0.1_140 * b
def _snake_case ( A ) -> np.ndarray:
return (gray > 127) & (gray <= 255)
def _snake_case ( A , A ) -> np.ndarray:
lowerCAmelCase__ = np.zeros_like(A )
lowerCAmelCase__ = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
lowerCAmelCase__ = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
lowerCAmelCase__ = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
lowerCAmelCase__ = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
__UpperCAmelCase = Path(__file__).resolve().parent / '''image_data''' / '''lena.jpg'''
__UpperCAmelCase = np.array(Image.open(lena_path))
# kernel to be applied
__UpperCAmelCase = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
__UpperCAmelCase = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
__UpperCAmelCase = Image.fromarray(output).convert('''RGB''')
pil_img.save('''result_dilation.png''') | 90 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ) -> Union[str, Any]:
A : Dict = logging.get_logger()
# the current default level is logging.WARNING
A : List[Any] = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(__UpperCAmelCase )
def snake_case ( self ) -> str:
A : Any = logging.get_verbosity()
A : Optional[Any] = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
A : Tuple = '''Testing 1, 2, 3'''
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(__UpperCAmelCase ) as cl:
logger.warning(__UpperCAmelCase )
self.assertEqual(cl.out , msg + '''\n''' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(__UpperCAmelCase ) as cl:
logger.warning(__UpperCAmelCase )
self.assertEqual(cl.out , '''''' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(__UpperCAmelCase ) as cl:
logger.warning(__UpperCAmelCase )
self.assertEqual(cl.out , msg + '''\n''' )
# restore to the original level
logging.set_verbosity(__UpperCAmelCase )
@mockenv(TRANSFORMERS_VERBOSITY='''error''' )
def snake_case ( self ) -> Optional[int]:
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
A : int = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
A : Any = os.getenv('''TRANSFORMERS_VERBOSITY''' , __UpperCAmelCase )
A : List[str] = logging.log_levels[env_level_str]
A : Optional[int] = logging.get_verbosity()
self.assertEqual(
__UpperCAmelCase , __UpperCAmelCase , f'TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}' , )
# restore to the original level
A : str = ''''''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='''super-error''' )
def snake_case ( self ) -> Optional[int]:
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
A : str = logging.logging.getLogger()
with CaptureLogger(__UpperCAmelCase ) as cl:
# this action activates the env var
logging.get_logger('''transformers.models.bart.tokenization_bart''' )
self.assertIn('''Unknown option TRANSFORMERS_VERBOSITY=super-error''' , cl.out )
# no need to restore as nothing was changed
def snake_case ( self ) -> Optional[int]:
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
A : Union[str, Any] = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
A : Optional[int] = '''Testing 1, 2, 3'''
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''1''' ):
# nothing should be logged as env var disables this method
with CaptureLogger(__UpperCAmelCase ) as cl:
logger.warning_advice(__UpperCAmelCase )
self.assertEqual(cl.out , '''''' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''''' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(__UpperCAmelCase ) as cl:
logger.warning_advice(__UpperCAmelCase )
self.assertEqual(cl.out , msg + '''\n''' )
def snake_case__ ( ):
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 542 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A: str = {
"configuration_clipseg": [
"CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPSegConfig",
"CLIPSegTextConfig",
"CLIPSegVisionConfig",
],
"processing_clipseg": ["CLIPSegProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Union[str, Any] = [
"CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPSegModel",
"CLIPSegPreTrainedModel",
"CLIPSegTextModel",
"CLIPSegVisionModel",
"CLIPSegForImageSegmentation",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
A: Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 713 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( a : int = 4 ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : Tuple = abs(a ) or 4
return [[1 + x + y * row_size for x in range(a )] for y in range(a )]
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(transpose(a ) )
# OR.. transpose(reverse_column(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(reverse_column(a ) )
# OR.. reverse_column(reverse_row(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_column(transpose(a ) )
# OR.. transpose(reverse_row(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : Any = [list(a ) for x in zip(*a )]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : List[str] = matrix[::-1]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : str = [x[::-1] for x in matrix]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> None:
"""simple docstring"""
for i in matrix:
print(*a )
if __name__ == "__main__":
A: Dict = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 90 counterclockwise:\n")
print_matrix(rotate_aa(matrix))
A: List[Any] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 180:\n")
print_matrix(rotate_aaa(matrix))
A: List[str] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 270 counterclockwise:\n")
print_matrix(rotate_aaa(matrix))
| 7 | 0 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class lowerCAmelCase_ ( unittest.TestCase , lowercase ):
"""simple docstring"""
def __a ( self :int ):
UpperCamelCase__ :Dict = load_tool("""text-to-speech""" )
self.tool.setup()
def __a ( self :Union[str, Any] ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
UpperCamelCase__ :Tuple = self.tool("""hey""" )
UpperCamelCase__ :Optional[int] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
def __a ( self :int ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
UpperCamelCase__ :List[str] = self.tool("""hey""" )
UpperCamelCase__ :Tuple = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) ) | 45 |
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : int = ["pixel_values"]
def __init__( self , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = 1 / 255 , _lowerCAmelCase = True , _lowerCAmelCase = IMAGENET_DEFAULT_MEAN , _lowerCAmelCase = IMAGENET_DEFAULT_STD , **_lowerCAmelCase , ) -> None:
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase = size if size is not None else {"shortest_edge": 224}
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_lowerCAmelCase = crop_size if crop_size is not None else {"height": 224, "width": 224}
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , param_name="crop_size" )
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = resample
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
_lowerCAmelCase = do_rescale
_lowerCAmelCase = rescale_factor
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_lowerCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
_lowerCAmelCase = int((256 / 224) * size["shortest_edge"] )
_lowerCAmelCase = get_resize_output_image_size(_lowerCAmelCase , size=_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_lowerCAmelCase = {"height": output_size[0], "width": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
_lowerCAmelCase , size=(size_dict["height"], size_dict["width"]) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
_lowerCAmelCase = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(_lowerCAmelCase , size=(size["height"], size["width"]) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , **_lowerCAmelCase , ) -> BatchFeature:
_lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase = resample if resample is not None else self.resample
_lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase = image_std if image_std is not None else self.image_std
_lowerCAmelCase = size if size is not None else self.size
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , param_name="crop_size" )
_lowerCAmelCase = make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_lowerCAmelCase = [to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
_lowerCAmelCase = [self.resize(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for image in images]
if do_center_crop:
_lowerCAmelCase = [self.center_crop(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
if do_rescale:
_lowerCAmelCase = [self.rescale(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
if do_normalize:
_lowerCAmelCase = [self.normalize(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for image in images]
_lowerCAmelCase = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
_lowerCAmelCase = {"pixel_values": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 18 | 0 |
import os
def __magic_name__ ( ) -> Tuple:
with open(os.path.dirname(lowercase__ ) + '/p022_names.txt' ) as file:
_lowercase : List[Any] = str(file.readlines()[0] )
_lowercase : List[Any] = names.replace('"' , '' ).split(',' )
names.sort()
_lowercase : Any = 0
_lowercase : int = 0
for i, name in enumerate(lowercase__ ):
for letter in name:
name_score += ord(lowercase__ ) - 64
total_score += (i + 1) * name_score
_lowercase : Any = 0
return total_score
if __name__ == "__main__":
print(solution())
| 702 |
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
UpperCamelCase = logging.getLogger(__name__)
UpperCamelCase = {"facebook/bart-base": BartForConditionalGeneration}
UpperCamelCase = {"facebook/bart-base": BartTokenizer}
def __magic_name__ ( ) -> str:
_lowercase : Optional[int] = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' )
parser.add_argument(
'--validation_file' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='A csv or a json file containing the validation data.' )
parser.add_argument(
'--max_length' , type=SCREAMING_SNAKE_CASE , default=5 , help='The maximum total input sequence length after tokenization.' , )
parser.add_argument(
'--num_beams' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help=(
'Number of beams to use for evaluation. This argument will be '
'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'
) , )
parser.add_argument(
'--model_name_or_path' , type=SCREAMING_SNAKE_CASE , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=SCREAMING_SNAKE_CASE , )
parser.add_argument(
'--config_name' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Pretrained config name or path if not the same as model_name' , )
parser.add_argument(
'--device' , type=SCREAMING_SNAKE_CASE , default='cpu' , help='Device where the model will be run' , )
parser.add_argument('--output_file_path' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Where to store the final ONNX file.' )
_lowercase : Optional[Any] = parser.parse_args()
return args
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="cpu" ) -> List[Any]:
_lowercase : Dict = model_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
_lowercase : int = tokenizer_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE )
if model_name in ["facebook/bart-base"]:
_lowercase : Dict = 0
_lowercase : Optional[int] = None
_lowercase : Union[str, Any] = 0
return huggingface_model, tokenizer
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
model.eval()
_lowercase : List[Any] = None
_lowercase : List[str] = torch.jit.script(BARTBeamSearchGenerator(SCREAMING_SNAKE_CASE ) )
with torch.no_grad():
_lowercase : Optional[int] = 'My friends are cool but they eat too many carbs.'
_lowercase : int = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors='pt' ).to(model.device )
_lowercase : str = model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , early_stopping=SCREAMING_SNAKE_CASE , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
SCREAMING_SNAKE_CASE , (
inputs['input_ids'],
inputs['attention_mask'],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , SCREAMING_SNAKE_CASE , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'seq'},
'output_ids': {0: 'batch', 1: 'seq_out'},
} , example_outputs=SCREAMING_SNAKE_CASE , )
logger.info('Model exported to {}'.format(SCREAMING_SNAKE_CASE ) )
_lowercase : str = remove_dup_initializers(os.path.abspath(SCREAMING_SNAKE_CASE ) )
logger.info('Deduplicated and optimized model written to {}'.format(SCREAMING_SNAKE_CASE ) )
_lowercase : Union[str, Any] = onnxruntime.InferenceSession(SCREAMING_SNAKE_CASE )
_lowercase : Union[str, Any] = ort_sess.run(
SCREAMING_SNAKE_CASE , {
'input_ids': inputs['input_ids'].cpu().numpy(),
'attention_mask': inputs['attention_mask'].cpu().numpy(),
'num_beams': np.array(SCREAMING_SNAKE_CASE ),
'max_length': np.array(SCREAMING_SNAKE_CASE ),
'decoder_start_token_id': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('Model outputs from torch and ONNX Runtime are similar.' )
logger.info('Success.' )
def __magic_name__ ( ) -> Any:
_lowercase : Dict = parse_args()
_lowercase : Union[str, Any] = 5
_lowercase : Union[str, Any] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_lowercase : Optional[Any] = torch.device(args.device )
_lowercase , _lowercase : List[Any] = load_model_tokenizer(args.model_name_or_path , SCREAMING_SNAKE_CASE )
if model.config.decoder_start_token_id is None:
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' )
model.to(SCREAMING_SNAKE_CASE )
if args.max_length:
_lowercase : Any = args.max_length
if args.num_beams:
_lowercase : List[str] = args.num_beams
if args.output_file_path:
_lowercase : Union[str, Any] = args.output_file_path
else:
_lowercase : Tuple = 'BART.onnx'
logger.info('Exporting model to ONNX' )
export_and_validate_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 677 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''vivit'''
def __init__( self ,_SCREAMING_SNAKE_CASE=224 ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=[2, 16, 16] ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=3_072 ,_SCREAMING_SNAKE_CASE="gelu_fast" ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=1e-06 ,_SCREAMING_SNAKE_CASE=True ,**_SCREAMING_SNAKE_CASE ,) -> int:
UpperCAmelCase_ : List[str] = hidden_size
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : List[str] = num_attention_heads
UpperCAmelCase_ : List[str] = intermediate_size
UpperCAmelCase_ : Tuple = hidden_act
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = initializer_range
UpperCAmelCase_ : Tuple = layer_norm_eps
UpperCAmelCase_ : List[str] = image_size
UpperCAmelCase_ : str = num_frames
UpperCAmelCase_ : str = tubelet_size
UpperCAmelCase_ : int = num_channels
UpperCAmelCase_ : Any = qkv_bias
super().__init__(**_SCREAMING_SNAKE_CASE ) | 30 |
"""simple docstring"""
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowercase_ = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
lowercase_ = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
lowercase_ = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
lowercase_ = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
lowercase_ = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]),
('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [14, 13, 12, 11, 10]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
lowercase_ = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
lowercase_ = (
('JH AH TH KH QH', 23),
('JH 9H TH KH QH', 22),
('JC KH JS JD JH', 21),
('KH KC 3S 3H 3D', 20),
('8C 9C 5C 3C TC', 19),
('JS QS 9H TS KH', 18),
('7C 7S KH 2H 7H', 17),
('3C KH 5D 5S KH', 16),
('QH 8H KD JH 8S', 15),
('2D 6D 9D TH 7D', 14),
)
def UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = randrange(len(_lowercase ) ), randrange(len(_lowercase ) )
lowerCAmelCase_ = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)]
lowerCAmelCase_ , lowerCAmelCase_ = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def UpperCAmelCase ( _lowercase : int = 1_0_0 ) -> int:
"""simple docstring"""
return (generate_random_hand() for _ in range(_lowercase ))
@pytest.mark.parametrize('''hand, expected''' , _lowercase )
def UpperCAmelCase ( _lowercase : Union[str, Any] , _lowercase : Optional[int] ) -> str:
"""simple docstring"""
assert PokerHand(_lowercase )._is_flush() == expected
@pytest.mark.parametrize('''hand, expected''' , _lowercase )
def UpperCAmelCase ( _lowercase : List[Any] , _lowercase : List[str] ) -> Optional[Any]:
"""simple docstring"""
assert PokerHand(_lowercase )._is_straight() == expected
@pytest.mark.parametrize('''hand, expected, card_values''' , _lowercase )
def UpperCAmelCase ( _lowercase : str , _lowercase : Any , _lowercase : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCAmelCase_ = PokerHand(_lowercase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('''hand, expected''' , _lowercase )
def UpperCAmelCase ( _lowercase : Any , _lowercase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
assert PokerHand(_lowercase )._is_same_kind() == expected
@pytest.mark.parametrize('''hand, expected''' , _lowercase )
def UpperCAmelCase ( _lowercase : Tuple , _lowercase : int ) -> str:
"""simple docstring"""
assert PokerHand(_lowercase )._hand_type == expected
@pytest.mark.parametrize('''hand, other, expected''' , _lowercase )
def UpperCAmelCase ( _lowercase : int , _lowercase : str , _lowercase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
assert PokerHand(_lowercase ).compare_with(PokerHand(_lowercase ) ) == expected
@pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() )
def UpperCAmelCase ( _lowercase : Any , _lowercase : Dict , _lowercase : Dict ) -> Union[str, Any]:
"""simple docstring"""
assert PokerHand(_lowercase ).compare_with(PokerHand(_lowercase ) ) == expected
def UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase_ = [PokerHand(_lowercase ) for hand in SORTED_HANDS]
lowerCAmelCase_ = poker_hands.copy()
shuffle(_lowercase )
lowerCAmelCase_ = chain(sorted(_lowercase ) )
for index, hand in enumerate(_lowercase ):
assert hand == poker_hands[index]
def UpperCAmelCase ( ) -> Any:
"""simple docstring"""
lowerCAmelCase_ = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )]
pokerhands.sort(reverse=_lowercase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase_ = PokerHand('''2C 4S AS 3D 5C''' )
lowerCAmelCase_ = True
lowerCAmelCase_ = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ = 0
lowerCAmelCase_ = os.path.abspath(os.path.dirname(_lowercase ) )
lowerCAmelCase_ = os.path.join(_lowercase , '''poker_hands.txt''' )
with open(_lowercase ) as file_hand:
for line in file_hand:
lowerCAmelCase_ = line[:1_4].strip()
lowerCAmelCase_ = line[1_5:].strip()
lowerCAmelCase_ , lowerCAmelCase_ = PokerHand(_lowercase ), PokerHand(_lowercase )
lowerCAmelCase_ = player.compare_with(_lowercase )
if output == "Win":
answer += 1
assert answer == 3_7_6 | 552 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""nielsr/canine-s""": 2048,
}
# Unicode defines 1,114,112 total “codepoints”
_lowercase = 1114112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_lowercase = 0
_lowercase = 0xE0_00
_lowercase = 0xE0_01
_lowercase = 0xE0_02
_lowercase = 0xE0_03
_lowercase = 0xE0_04
# Maps special codepoints to human-readable names.
_lowercase = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: """[CLS]""",
SEP: """[SEP]""",
BOS: """[BOS]""",
MASK: """[MASK]""",
PAD: """[PAD]""",
RESERVED: """[RESERVED]""",
}
# Maps special codepoint human-readable names to their codepoint values.
_lowercase = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _lowercase=chr(_lowercase ) , _lowercase=chr(_lowercase ) , _lowercase=chr(_lowercase ) , _lowercase=chr(_lowercase ) , _lowercase=chr(_lowercase ) , _lowercase=chr(_lowercase ) , _lowercase=False , _lowercase=2_048 , **_lowercase , ):
"""simple docstring"""
_lowerCAmelCase = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else bos_token
_lowerCAmelCase = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else eos_token
_lowerCAmelCase = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else sep_token
_lowerCAmelCase = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else cls_token
_lowerCAmelCase = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
bos_token=_lowercase , eos_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , add_prefix_space=_lowercase , model_max_length=_lowercase , **_lowercase , )
# Creates a mapping for looking up the IDs of special symbols.
_lowerCAmelCase = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
_lowerCAmelCase = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
_lowerCAmelCase = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
_lowerCAmelCase = UNICODE_VOCAB_SIZE
_lowerCAmelCase = len(self._special_codepoints )
@property
def _lowercase ( self ):
"""simple docstring"""
return self._unicode_vocab_size
def _lowercase ( self , _lowercase ):
"""simple docstring"""
return list(_lowercase )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
try:
return ord(_lowercase )
except TypeError:
raise ValueError(F'invalid token: \'{token}\'' )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(_lowercase )
except TypeError:
raise ValueError(F'invalid id: {index}' )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
return "".join(_lowercase )
def _lowercase ( self , _lowercase , _lowercase = None ):
"""simple docstring"""
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
_lowerCAmelCase = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def _lowercase ( self , _lowercase , _lowercase = None , _lowercase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
_lowerCAmelCase = [1] + ([0] * len(_lowercase )) + [1]
if token_ids_a is not None:
result += ([0] * len(_lowercase )) + [1]
return result
def _lowercase ( self , _lowercase , _lowercase = None ):
"""simple docstring"""
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
_lowerCAmelCase = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def _lowercase ( self , _lowercase , _lowercase = None ):
"""simple docstring"""
return ()
| 713 |
'''simple docstring'''
from torch import nn
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , _lowercase , _lowercase ):
"""simple docstring"""
super().__init__()
_lowerCAmelCase = class_size
_lowerCAmelCase = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
_lowerCAmelCase = nn.Linear(_lowercase , _lowercase )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.mlp(_lowercase )
return logits
| 162 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ : Optional[int] = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Dict = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
snake_case__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 23 |
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case__ ( UpperCamelCase_ ):
def __init__( self : int , _lowerCamelCase : str , _lowerCamelCase : Tuple ):
super().__init__()
# make sure scheduler can always be converted to DDIM
snake_case__ : List[Any] = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
@torch.no_grad()
def __call__( self : Optional[int] , _lowerCamelCase : int = 1 , _lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowerCamelCase : float = 0.0 , _lowerCamelCase : int = 5_0 , _lowerCamelCase : Optional[bool] = None , _lowerCamelCase : Optional[str] = "pil" , _lowerCamelCase : bool = True , ):
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , _lowerCamelCase ):
snake_case__ : Optional[Any] = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
snake_case__ : Any = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(_lowerCamelCase )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
snake_case__ : int = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(_lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
snake_case__ : Optional[int] = self.unet(_lowerCamelCase , _lowerCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
snake_case__ : int = self.scheduler.step(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , eta=_lowerCamelCase , use_clipped_model_output=_lowerCamelCase , generator=_lowerCamelCase ).prev_sample
snake_case__ : int = (image / 2 + 0.5).clamp(0 , 1 )
snake_case__ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case__ : Union[str, Any] = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCamelCase )
| 170 | 0 |
'''simple docstring'''
def __a ( __lowerCamelCase : list[int] , __lowerCamelCase : list[int] ) -> tuple[float, float]:
'''simple docstring'''
if not len(__lowerCamelCase ) == len(__lowerCamelCase ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can't be zero." )
# Extract the coefficients
lowercase_ , lowercase_ , lowercase_ = equationa
lowercase_ , lowercase_ , lowercase_ = equationa
# Calculate the determinants of the matrices
lowercase_ = aa * ba - aa * ba
lowercase_ = ca * ba - ca * ba
lowercase_ = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
lowercase_ = determinant_x / determinant
lowercase_ = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 461 | '''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ : List[Any] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( __lowerCamelCase , unittest.TestCase ):
lowerCamelCase_ =ReformerTokenizer
lowerCamelCase_ =ReformerTokenizerFast
lowerCamelCase_ =True
lowerCamelCase_ =False
lowerCamelCase_ =True
def __UpperCAmelCase ( self : Optional[Any]) -> Optional[Any]:
super().setUp()
lowercase_ = ReformerTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase)
tokenizer.save_pretrained(self.tmpdirname)
def __UpperCAmelCase ( self : int) -> int:
lowercase_ = "<s>"
lowercase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase) , __lowerCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase) , __lowerCAmelCase)
def __UpperCAmelCase ( self : Tuple) -> Optional[int]:
lowercase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "<unk>")
self.assertEqual(vocab_keys[1] , "<s>")
self.assertEqual(vocab_keys[-1] , "j")
self.assertEqual(len(__lowerCAmelCase) , 1000)
def __UpperCAmelCase ( self : Union[str, Any]) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1000)
def __UpperCAmelCase ( self : List[Any]) -> List[Any]:
if not self.test_rust_tokenizer:
return
lowercase_ = self.get_tokenizer()
lowercase_ = self.get_rust_tokenizer()
lowercase_ = "I was born in 92000, and this is falsé."
lowercase_ = tokenizer.tokenize(__lowerCAmelCase)
lowercase_ = rust_tokenizer.tokenize(__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
lowercase_ = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
lowercase_ = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
lowercase_ = self.get_rust_tokenizer()
lowercase_ = tokenizer.encode(__lowerCAmelCase)
lowercase_ = rust_tokenizer.encode(__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
def __UpperCAmelCase ( self : Tuple , __lowerCAmelCase : List[str]=15) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase_ = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
# Simple input
lowercase_ = "This is a simple input"
lowercase_ = ["This is a simple input 1", "This is a simple input 2"]
lowercase_ = ("This is a simple input", "This is a pair")
lowercase_ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="max_length")
# Simple input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="max_length")
# Simple input
self.assertRaises(
__lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="max_length" , )
# Pair input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="max_length")
# Pair input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="max_length")
# Pair input
self.assertRaises(
__lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="max_length" , )
def __UpperCAmelCase ( self : Tuple) -> Tuple:
pass
def __UpperCAmelCase ( self : Any) -> Optional[Any]:
lowercase_ = ReformerTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase)
lowercase_ = tokenizer.tokenize("This is a test")
self.assertListEqual(__lowerCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase) , [285, 46, 10, 170, 382] , )
lowercase_ = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowercase_ = tokenizer.convert_tokens_to_ids(__lowerCAmelCase)
self.assertListEqual(
__lowerCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowercase_ = tokenizer.convert_ids_to_tokens(__lowerCAmelCase)
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def __UpperCAmelCase ( self : List[str]) -> Any:
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment")
@slow
def __UpperCAmelCase ( self : Tuple) -> str:
lowercase_ = "Hello World!"
lowercase_ = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase))
@slow
def __UpperCAmelCase ( self : Any) -> Dict:
lowercase_ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
lowercase_ = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase))
@require_torch
@slow
def __UpperCAmelCase ( self : Any) -> Tuple:
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
lowercase_ = list(self.big_tokenizer.get_vocab().keys())[:10]
lowercase_ = " ".join(__lowerCAmelCase)
lowercase_ = self.big_tokenizer.encode_plus(__lowerCAmelCase , return_tensors="pt")
lowercase_ = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt")
lowercase_ = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
lowercase_ = encoded_sequence["input_ids"].shape
lowercase_ = ReformerModel(__lowerCAmelCase)
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__lowerCAmelCase)
model(**__lowerCAmelCase)
@slow
def __UpperCAmelCase ( self : List[Any]) -> Tuple:
# fmt: off
lowercase_ = {"input_ids": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
lowercase_ = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=__lowerCAmelCase , sequences=__lowerCAmelCase , )
| 461 | 1 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
lowercase = FunnelConfig.from_json_file(__magic_name__ )
print(F'''Building PyTorch model from configuration: {config}''' )
lowercase = FunnelBaseModel(__magic_name__ ) if base_model else FunnelModel(__magic_name__ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(__magic_name__ , __magic_name__ , __magic_name__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __magic_name__ )
if __name__ == "__main__":
_snake_case : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
_snake_case : List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 441 |
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
_snake_case : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(__a )
class UpperCamelCase_ ( __a ):
'''simple docstring'''
def __init__( self :Tuple , **lowerCAmelCase__ :Dict ) ->int:
super().__init__(**lowerCAmelCase__ )
requires_backends(self , "vision" )
requires_backends(self , "torch" )
if self.framework != "pt":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
self.check_model_type(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE( self :Tuple , **lowerCAmelCase__ :Union[str, Any] ) ->List[Any]:
lowercase = {}
lowercase = {}
lowercase = {}
# preprocess args
if "points_per_batch" in kwargs:
lowercase = kwargs["points_per_batch"]
if "points_per_crop" in kwargs:
lowercase = kwargs["points_per_crop"]
if "crops_n_layers" in kwargs:
lowercase = kwargs["crops_n_layers"]
if "crop_overlap_ratio" in kwargs:
lowercase = kwargs["crop_overlap_ratio"]
if "crop_n_points_downscale_factor" in kwargs:
lowercase = kwargs["crop_n_points_downscale_factor"]
# postprocess args
if "pred_iou_thresh" in kwargs:
lowercase = kwargs["pred_iou_thresh"]
if "stability_score_offset" in kwargs:
lowercase = kwargs["stability_score_offset"]
if "mask_threshold" in kwargs:
lowercase = kwargs["mask_threshold"]
if "stability_score_thresh" in kwargs:
lowercase = kwargs["stability_score_thresh"]
if "crops_nms_thresh" in kwargs:
lowercase = kwargs["crops_nms_thresh"]
if "output_rle_mask" in kwargs:
lowercase = kwargs["output_rle_mask"]
if "output_bboxes_mask" in kwargs:
lowercase = kwargs["output_bboxes_mask"]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self :List[str] , lowerCAmelCase__ :int , *lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :Any=None , **lowerCAmelCase__ :List[str] ) ->Optional[int]:
return super().__call__(lowerCAmelCase__ , *lowerCAmelCase__ , num_workers=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE( self :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any]=64 , lowerCAmelCase__ :int = 0 , lowerCAmelCase__ :float = 512 / 1500 , lowerCAmelCase__ :Optional[int] = 32 , lowerCAmelCase__ :Optional[int] = 1 , ) ->Any:
lowercase = load_image(lowerCAmelCase__ )
lowercase = self.image_processor.size["longest_edge"]
lowercase , lowercase , lowercase , lowercase = self.image_processor.generate_crop_boxes(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = self.image_processor(images=lowerCAmelCase__ , return_tensors="pt" )
with self.device_placement():
if self.framework == "pt":
lowercase = self.get_inference_context()
with inference_context():
lowercase = self._ensure_tensor_on_device(lowerCAmelCase__ , device=self.device )
lowercase = self.model.get_image_embeddings(model_inputs.pop("pixel_values" ) )
lowercase = image_embeddings
lowercase = grid_points.shape[1]
lowercase = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. "
"To return all points at once, set points_per_batch to None" )
for i in range(0 , lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = grid_points[:, i : i + points_per_batch, :, :]
lowercase = input_labels[:, i : i + points_per_batch]
lowercase = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def SCREAMING_SNAKE_CASE( self :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Dict=0.88 , lowerCAmelCase__ :Dict=0.95 , lowerCAmelCase__ :str=0 , lowerCAmelCase__ :int=1 , ) ->str:
lowercase = model_inputs.pop("input_boxes" )
lowercase = model_inputs.pop("is_last" )
lowercase = model_inputs.pop("original_sizes" ).tolist()
lowercase = model_inputs.pop("reshaped_input_sizes" ).tolist()
lowercase = self.model(**lowerCAmelCase__ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
lowercase = model_outputs["pred_masks"]
lowercase = self.image_processor.post_process_masks(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , binarize=lowerCAmelCase__ )
lowercase = model_outputs["iou_scores"]
lowercase , lowercase , lowercase = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def SCREAMING_SNAKE_CASE( self :Any , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any]=False , lowerCAmelCase__ :str=False , lowerCAmelCase__ :int=0.7 , ) ->List[Any]:
lowercase = []
lowercase = []
lowercase = []
for model_output in model_outputs:
all_scores.append(model_output.pop("iou_scores" ) )
all_masks.extend(model_output.pop("masks" ) )
all_boxes.append(model_output.pop("boxes" ) )
lowercase = torch.cat(lowerCAmelCase__ )
lowercase = torch.cat(lowerCAmelCase__ )
lowercase , lowercase , lowercase , lowercase = self.image_processor.post_process_for_mask_generation(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = defaultdict(lowerCAmelCase__ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowerCAmelCase__ )
lowercase = {}
if output_rle_mask:
lowercase = rle_mask
if output_bboxes_mask:
lowercase = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 441 | 1 |
"""simple docstring"""
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
a_ : List[str] = True
from torch.cuda.amp import autocast
a_ : int = logging.getLogger(__name__)
def UpperCAmelCase ( A__: Any=None , A__: List[Any]=None ) -> List[Any]:
return field(default_factory=lambda: default , metadata=A__ )
@dataclass
class __lowercase:
'''simple docstring'''
__a : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__a : Optional[str] = field(
default=lowercase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__a : Optional[bool] = field(
default=lowercase__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
__a : Optional[float] = field(
default=0.1 , metadata={'help': 'The dropout ratio for the attention probabilities.'} )
__a : Optional[float] = field(
default=0.1 , metadata={'help': 'The dropout ratio for activations inside the fully connected layer.'} )
__a : Optional[float] = field(
default=0.1 , metadata={
'help': 'The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.'
} , )
__a : Optional[float] = field(
default=0.1 , metadata={'help': 'The dropout probabilitiy for all 1D convolutional layers in feature extractor.'} , )
__a : Optional[float] = field(
default=0.0_5 , metadata={
'help': (
'Propability of each feature vector along the time axis to be chosen as the start of the vector'
'span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature'
'vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.'
)
} , )
__a : Optional[float] = field(default=0.0 , metadata={'help': 'The LayerDrop probability.'} )
@dataclass
class __lowercase:
'''simple docstring'''
__a : Optional[str] = field(
default=lowercase__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__a : Optional[str] = field(
default='train+validation' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
__a : bool = field(
default=lowercase__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
__a : Optional[int] = field(
default=lowercase__ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
__a : Optional[int] = field(
default=lowercase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__a : Optional[int] = field(
default=lowercase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of validation examples to this '
'value if set.'
)
} , )
__a : List[str] = list_field(
default=[',', '?', '.', '!', '-', ';', ':', '""', '%', '\'', '"', '�'] , metadata={'help': 'A list of characters to remove from the transcripts.'} , )
@dataclass
class __lowercase:
'''simple docstring'''
__a : WavaVecaProcessor
__a : Union[bool, str] = True
__a : Optional[int] = None
__a : Optional[int] = None
__a : Optional[int] = None
__a : Optional[int] = None
def __call__( self , __a ):
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
__lowerCamelCase : Union[str, Any] = [{'input_values': feature['input_values']} for feature in features]
__lowerCamelCase : Dict = [{'input_ids': feature['labels']} for feature in features]
__lowerCamelCase : Tuple = self.processor.pad(
__a , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
__lowerCamelCase : Union[str, Any] = self.processor.pad(
labels=__a , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='pt' , )
# replace padding with -100 to ignore loss correctly
__lowerCamelCase : List[str] = labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
__lowerCamelCase : Any = labels
return batch
class __lowercase( lowercase__ ):
'''simple docstring'''
def snake_case_ ( self , __a , __a ):
model.train()
__lowerCamelCase : Union[str, Any] = self._prepare_inputs(__a )
if self.use_amp:
with autocast():
__lowerCamelCase : Union[str, Any] = self.compute_loss(__a , __a )
else:
__lowerCamelCase : int = self.compute_loss(__a , __a )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
__lowerCamelCase : Optional[int] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__lowerCamelCase : List[Any] = loss.sum() / (inputs['labels'] >= 0).sum()
else:
raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
__lowerCamelCase : Optional[int] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__a ).backward()
elif self.use_apex:
with amp.scale_loss(__a , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__a )
else:
loss.backward()
return loss.detach()
def UpperCAmelCase ( ) -> Dict:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCamelCase : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[int] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__lowerCamelCase : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCamelCase : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , A__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
__lowerCamelCase : List[Any] = datasets.load_dataset(
'common_voice' , data_args.dataset_config_name , split=data_args.train_split_name )
__lowerCamelCase : Any = datasets.load_dataset('common_voice' , data_args.dataset_config_name , split='test' )
# Create and save tokenizer
__lowerCamelCase : List[Any] = f'''[{"".join(data_args.chars_to_ignore )}]'''
def remove_special_characters(A__: Union[str, Any] ):
__lowerCamelCase : str = re.sub(A__ , '' , batch['sentence'] ).lower() + ' '
return batch
__lowerCamelCase : Optional[int] = train_dataset.map(A__ , remove_columns=['sentence'] )
__lowerCamelCase : Tuple = eval_dataset.map(A__ , remove_columns=['sentence'] )
def extract_all_chars(A__: str ):
__lowerCamelCase : Optional[int] = ' '.join(batch['text'] )
__lowerCamelCase : Optional[int] = list(set(A__ ) )
return {"vocab": [vocab], "all_text": [all_text]}
__lowerCamelCase : Optional[int] = train_dataset.map(
A__ , batched=A__ , batch_size=-1 , keep_in_memory=A__ , remove_columns=train_dataset.column_names , )
__lowerCamelCase : List[Any] = train_dataset.map(
A__ , batched=A__ , batch_size=-1 , keep_in_memory=A__ , remove_columns=eval_dataset.column_names , )
__lowerCamelCase : List[str] = list(set(vocab_train['vocab'][0] ) | set(vocab_test['vocab'][0] ) )
__lowerCamelCase : List[Any] = {v: k for k, v in enumerate(A__ )}
__lowerCamelCase : Optional[Any] = vocab_dict[' ']
del vocab_dict[" "]
__lowerCamelCase : List[str] = len(A__ )
__lowerCamelCase : int = len(A__ )
with open('vocab.json' , 'w' ) as vocab_file:
json.dump(A__ , A__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCamelCase : Optional[Any] = WavaVecaCTCTokenizer(
'vocab.json' , unk_token='[UNK]' , pad_token='[PAD]' , word_delimiter_token='|' , )
__lowerCamelCase : Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0.0 , do_normalize=A__ , return_attention_mask=A__ )
__lowerCamelCase : List[Any] = WavaVecaProcessor(feature_extractor=A__ , tokenizer=A__ )
__lowerCamelCase : Union[str, Any] = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='mean' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
__lowerCamelCase : str = min(len(A__ ) , data_args.max_train_samples )
__lowerCamelCase : str = train_dataset.select(range(A__ ) )
if data_args.max_val_samples is not None:
__lowerCamelCase : List[str] = eval_dataset.select(range(data_args.max_val_samples ) )
__lowerCamelCase : int = torchaudio.transforms.Resample(48000 , 16000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(A__: Optional[int] ):
__lowerCamelCase , __lowerCamelCase : int = torchaudio.load(batch['path'] )
__lowerCamelCase : Any = resampler(A__ ).squeeze().numpy()
__lowerCamelCase : List[str] = 16000
__lowerCamelCase : Union[str, Any] = batch['text']
return batch
__lowerCamelCase : Union[str, Any] = train_dataset.map(
A__ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
__lowerCamelCase : List[str] = eval_dataset.map(
A__ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(A__: Dict ):
# check that all files have the correct sampling rate
assert (
len(set(batch['sampling_rate'] ) ) == 1
), f'''Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'''
__lowerCamelCase : Optional[Any] = processor(
audio=batch['speech'] , text=batch['target_text'] , sampling_rate=batch['sampling_rate'][0] )
batch.update(A__ )
return batch
__lowerCamelCase : str = train_dataset.map(
A__ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=A__ , num_proc=data_args.preprocessing_num_workers , )
__lowerCamelCase : Optional[int] = eval_dataset.map(
A__ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=A__ , num_proc=data_args.preprocessing_num_workers , )
# Metric
__lowerCamelCase : Optional[Any] = datasets.load_metric('wer' )
def compute_metrics(A__: Tuple ):
__lowerCamelCase : int = pred.predictions
__lowerCamelCase : Optional[Any] = np.argmax(A__ , axis=-1 )
__lowerCamelCase : int = processor.tokenizer.pad_token_id
__lowerCamelCase : Optional[int] = processor.batch_decode(A__ )
# we do not want to group tokens when computing the metrics
__lowerCamelCase : Tuple = processor.batch_decode(pred.label_ids , group_tokens=A__ )
__lowerCamelCase : Dict = wer_metric.compute(predictions=A__ , references=A__ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
__lowerCamelCase : Union[str, Any] = DataCollatorCTCWithPadding(processor=A__ , padding=A__ )
# Initialize our Trainer
__lowerCamelCase : int = CTCTrainer(
model=A__ , data_collator=A__ , args=A__ , compute_metrics=A__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__lowerCamelCase : Any = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
__lowerCamelCase : Optional[Any] = model_args.model_name_or_path
else:
__lowerCamelCase : Any = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
__lowerCamelCase : str = trainer.train(resume_from_checkpoint=A__ )
trainer.save_model()
__lowerCamelCase : Dict = train_result.metrics
__lowerCamelCase : Optional[int] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(A__ )
)
__lowerCamelCase : Optional[Any] = min(A__ , len(A__ ) )
trainer.log_metrics('train' , A__ )
trainer.save_metrics('train' , A__ )
trainer.save_state()
# Evaluation
__lowerCamelCase : str = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowerCamelCase : Dict = trainer.evaluate()
__lowerCamelCase : Optional[int] = data_args.max_val_samples if data_args.max_val_samples is not None else len(A__ )
__lowerCamelCase : Union[str, Any] = min(A__ , len(A__ ) )
trainer.log_metrics('eval' , A__ )
trainer.save_metrics('eval' , A__ )
return results
if __name__ == "__main__":
main()
| 263 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __lowercase:
'''simple docstring'''
__a : Optional[Any] = LEDConfig
__a : Dict = {}
__a : int = 'gelu'
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=False , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a=0.1 , __a=0.1 , __a=20 , __a=2 , __a=1 , __a=0 , __a=4 , ):
__lowerCamelCase : str = parent
__lowerCamelCase : Optional[int] = batch_size
__lowerCamelCase : List[str] = seq_length
__lowerCamelCase : Optional[Any] = is_training
__lowerCamelCase : Dict = use_labels
__lowerCamelCase : Any = vocab_size
__lowerCamelCase : int = hidden_size
__lowerCamelCase : Any = num_hidden_layers
__lowerCamelCase : Union[str, Any] = num_attention_heads
__lowerCamelCase : List[str] = intermediate_size
__lowerCamelCase : str = hidden_dropout_prob
__lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
__lowerCamelCase : Any = max_position_embeddings
__lowerCamelCase : str = eos_token_id
__lowerCamelCase : str = pad_token_id
__lowerCamelCase : str = bos_token_id
__lowerCamelCase : int = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
__lowerCamelCase : Dict = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
__lowerCamelCase : int = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def snake_case_ ( self ):
__lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCamelCase : Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCamelCase : Union[str, Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase : Dict = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
__lowerCamelCase : int = prepare_led_inputs_dict(__a , __a , __a )
__lowerCamelCase : Union[str, Any] = tf.concat(
[tf.zeros_like(__a )[:, :-1], tf.ones_like(__a )[:, -1:]] , axis=-1 , )
__lowerCamelCase : Union[str, Any] = global_attention_mask
return config, inputs_dict
def snake_case_ ( self , __a , __a ):
__lowerCamelCase : Optional[int] = TFLEDModel(config=__a ).get_decoder()
__lowerCamelCase : List[str] = inputs_dict['input_ids']
__lowerCamelCase : Dict = input_ids[:1, :]
__lowerCamelCase : Any = inputs_dict['attention_mask'][:1, :]
__lowerCamelCase : Tuple = 1
# first forward pass
__lowerCamelCase : List[str] = model(__a , attention_mask=__a , use_cache=__a )
__lowerCamelCase , __lowerCamelCase : int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCamelCase : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCamelCase : List[str] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowerCamelCase : Any = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowerCamelCase : Any = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowerCamelCase : Dict = model(__a , attention_mask=__a )[0]
__lowerCamelCase : Optional[int] = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowerCamelCase : Any = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowerCamelCase : int = output_from_no_past[:, -3:, random_slice_idx]
__lowerCamelCase : Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1E-3 )
def UpperCAmelCase ( A__: Any , A__: Union[str, Any] , A__: Dict , A__: int=None , A__: str=None , A__: Tuple=None , A__: List[Any]=None , ) -> Optional[Any]:
if attention_mask is None:
__lowerCamelCase : Optional[int] = tf.cast(tf.math.not_equal(A__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__lowerCamelCase : Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__lowerCamelCase : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowerCamelCase : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __lowercase( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
__a : Union[str, Any] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
__a : Any = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
__a : str = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
__a : Union[str, Any] = True
__a : str = False
__a : Dict = False
__a : List[Any] = False
def snake_case_ ( self ):
__lowerCamelCase : int = TFLEDModelTester(self )
__lowerCamelCase : List[str] = ConfigTester(self , config_class=__a )
def snake_case_ ( self ):
self.config_tester.run_common_tests()
def snake_case_ ( self ):
__lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
def snake_case_ ( self ):
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Optional[Any] = tf.zeros_like(inputs_dict['attention_mask'] )
__lowerCamelCase : Any = 2
__lowerCamelCase : Dict = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , )
__lowerCamelCase : List[Any] = True
__lowerCamelCase : Tuple = self.model_tester.seq_length
__lowerCamelCase : Dict = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__a ):
__lowerCamelCase : List[Any] = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__a ):
__lowerCamelCase : List[str] = [t.numpy() for t in outputs.encoder_attentions]
__lowerCamelCase : Optional[Any] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
__lowerCamelCase : int = True
__lowerCamelCase : int = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : List[Any] = model_class(__a )
__lowerCamelCase : Optional[Any] = model(self._prepare_for_class(__a , __a ) )
__lowerCamelCase : Tuple = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
__lowerCamelCase : Any = model_class(__a )
__lowerCamelCase : List[str] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowerCamelCase : Tuple = True
__lowerCamelCase : Dict = model_class(__a )
__lowerCamelCase : List[Any] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
__lowerCamelCase : List[Any] = True
__lowerCamelCase : int = True
__lowerCamelCase : List[str] = model_class(__a )
__lowerCamelCase : Dict = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def snake_case_ ( self ):
pass
def snake_case_ ( self ):
# TODO: Head-masking not yet implement
pass
def UpperCAmelCase ( A__: Union[str, Any] ) -> List[Any]:
return tf.constant(A__ , dtype=tf.intaa )
a_ : Tuple = 1e-4
@slow
@require_tf
class __lowercase( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ):
__lowerCamelCase : Tuple = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
__lowerCamelCase : Union[str, Any] = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
__lowerCamelCase : Dict = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
__lowerCamelCase : Optional[int] = prepare_led_inputs_dict(model.config , __a , __a )
__lowerCamelCase : str = model(**__a )[0]
__lowerCamelCase : Optional[int] = (1, 1024, 768)
self.assertEqual(output.shape , __a )
# change to expected output here
__lowerCamelCase : int = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1E-3 )
def snake_case_ ( self ):
__lowerCamelCase : List[str] = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
__lowerCamelCase : Union[str, Any] = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
__lowerCamelCase : Dict = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
__lowerCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
__lowerCamelCase : Dict = model(**__a )[0]
__lowerCamelCase : List[Any] = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , __a )
# change to expected output here
__lowerCamelCase : Optional[Any] = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1E-3 , rtol=1E-3 )
| 263 | 1 |
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def a ( A__ , A__=1 ) -> int:
'''simple docstring'''
if n_shave_prefix_segments >= 0:
return ".".join(path.split('''.''' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('''.''' )[:n_shave_prefix_segments] )
def a ( A__ , A__=0 ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = []
for old_item in old_list:
SCREAMING_SNAKE_CASE__ : int = old_item.replace('''in_layers.0''' , '''norm1''' )
SCREAMING_SNAKE_CASE__ : Tuple = new_item.replace('''in_layers.2''' , '''conv1''' )
SCREAMING_SNAKE_CASE__ : Dict = new_item.replace('''out_layers.0''' , '''norm2''' )
SCREAMING_SNAKE_CASE__ : List[str] = new_item.replace('''out_layers.3''' , '''conv2''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = new_item.replace('''emb_layers.1''' , '''time_emb_proj''' )
SCREAMING_SNAKE_CASE__ : Dict = new_item.replace('''skip_connection''' , '''conv_shortcut''' )
SCREAMING_SNAKE_CASE__ : int = shave_segments(A__ , n_shave_prefix_segments=A__ )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def a ( A__ , A__=0 ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = []
for old_item in old_list:
SCREAMING_SNAKE_CASE__ : Optional[Any] = old_item
SCREAMING_SNAKE_CASE__ : List[Any] = new_item.replace('''norm.weight''' , '''group_norm.weight''' )
SCREAMING_SNAKE_CASE__ : Dict = new_item.replace('''norm.bias''' , '''group_norm.bias''' )
SCREAMING_SNAKE_CASE__ : List[Any] = new_item.replace('''proj_out.weight''' , '''proj_attn.weight''' )
SCREAMING_SNAKE_CASE__ : int = new_item.replace('''proj_out.bias''' , '''proj_attn.bias''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = shave_segments(A__ , n_shave_prefix_segments=A__ )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def a ( A__ , A__ , A__ , A__=None , A__=None , A__=None ) -> Optional[int]:
'''simple docstring'''
assert isinstance(A__ , A__ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
SCREAMING_SNAKE_CASE__ : Optional[Any] = old_checkpoint[path]
SCREAMING_SNAKE_CASE__ : Optional[Any] = old_tensor.shape[0] // 3
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
SCREAMING_SNAKE_CASE__ : Optional[int] = old_tensor.shape[0] // config['''num_head_channels'''] // 3
SCREAMING_SNAKE_CASE__ : List[Any] = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = old_tensor.split(channels // num_heads , dim=1 )
SCREAMING_SNAKE_CASE__ : Tuple = query.reshape(A__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = key.reshape(A__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = value.reshape(A__ )
for path in paths:
SCREAMING_SNAKE_CASE__ : Optional[int] = path['''new''']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
SCREAMING_SNAKE_CASE__ : Any = new_path.replace('''middle_block.0''' , '''mid_block.resnets.0''' )
SCREAMING_SNAKE_CASE__ : int = new_path.replace('''middle_block.1''' , '''mid_block.attentions.0''' )
SCREAMING_SNAKE_CASE__ : Tuple = new_path.replace('''middle_block.2''' , '''mid_block.resnets.1''' )
if additional_replacements is not None:
for replacement in additional_replacements:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = new_path.replace(replacement['''old'''] , replacement['''new'''] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
SCREAMING_SNAKE_CASE__ : Tuple = old_checkpoint[path['''old''']][:, :, 0]
else:
SCREAMING_SNAKE_CASE__ : int = old_checkpoint[path['''old''']]
def a ( A__ , A__ ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {}
SCREAMING_SNAKE_CASE__ : Optional[Any] = checkpoint['''time_embed.0.weight''']
SCREAMING_SNAKE_CASE__ : Optional[Any] = checkpoint['''time_embed.0.bias''']
SCREAMING_SNAKE_CASE__ : int = checkpoint['''time_embed.2.weight''']
SCREAMING_SNAKE_CASE__ : List[Any] = checkpoint['''time_embed.2.bias''']
SCREAMING_SNAKE_CASE__ : List[str] = checkpoint['''input_blocks.0.0.weight''']
SCREAMING_SNAKE_CASE__ : Tuple = checkpoint['''input_blocks.0.0.bias''']
SCREAMING_SNAKE_CASE__ : int = checkpoint['''out.0.weight''']
SCREAMING_SNAKE_CASE__ : Dict = checkpoint['''out.0.bias''']
SCREAMING_SNAKE_CASE__ : List[str] = checkpoint['''out.2.weight''']
SCREAMING_SNAKE_CASE__ : Optional[int] = checkpoint['''out.2.bias''']
# Retrieves the keys for the input blocks only
SCREAMING_SNAKE_CASE__ : str = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} )
SCREAMING_SNAKE_CASE__ : Tuple = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(A__ )
}
# Retrieves the keys for the middle blocks only
SCREAMING_SNAKE_CASE__ : str = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} )
SCREAMING_SNAKE_CASE__ : List[str] = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(A__ )
}
# Retrieves the keys for the output blocks only
SCREAMING_SNAKE_CASE__ : Dict = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} )
SCREAMING_SNAKE_CASE__ : Any = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(A__ )
}
for i in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : List[str] = (i - 1) // (config['''num_res_blocks'''] + 1)
SCREAMING_SNAKE_CASE__ : Optional[int] = (i - 1) % (config['''num_res_blocks'''] + 1)
SCREAMING_SNAKE_CASE__ : Optional[Any] = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
SCREAMING_SNAKE_CASE__ : Any = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
SCREAMING_SNAKE_CASE__ : List[str] = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
SCREAMING_SNAKE_CASE__ : str = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
SCREAMING_SNAKE_CASE__ : Union[str, Any] = renew_resnet_paths(A__ )
SCREAMING_SNAKE_CASE__ : str = {'''old''': f"""input_blocks.{i}.0""", '''new''': f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
SCREAMING_SNAKE_CASE__ : Any = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''}
assign_to_checkpoint(
A__ , A__ , A__ , additional_replacements=[meta_path, resnet_op] , config=A__ )
if len(A__ ):
SCREAMING_SNAKE_CASE__ : int = renew_attention_paths(A__ )
SCREAMING_SNAKE_CASE__ : Any = {
'''old''': f"""input_blocks.{i}.1""",
'''new''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
SCREAMING_SNAKE_CASE__ : Any = {
f"""input_blocks.{i}.1.qkv.bias""": {
'''key''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'''query''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'''value''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
'''key''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'''query''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'''value''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
A__ , A__ , A__ , additional_replacements=[meta_path] , attention_paths_to_split=A__ , config=A__ , )
SCREAMING_SNAKE_CASE__ : Dict = middle_blocks[0]
SCREAMING_SNAKE_CASE__ : Any = middle_blocks[1]
SCREAMING_SNAKE_CASE__ : int = middle_blocks[2]
SCREAMING_SNAKE_CASE__ : Optional[int] = renew_resnet_paths(A__ )
assign_to_checkpoint(A__ , A__ , A__ , config=A__ )
SCREAMING_SNAKE_CASE__ : str = renew_resnet_paths(A__ )
assign_to_checkpoint(A__ , A__ , A__ , config=A__ )
SCREAMING_SNAKE_CASE__ : Dict = renew_attention_paths(A__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
'''middle_block.1.qkv.bias''': {
'''key''': '''mid_block.attentions.0.key.bias''',
'''query''': '''mid_block.attentions.0.query.bias''',
'''value''': '''mid_block.attentions.0.value.bias''',
},
'''middle_block.1.qkv.weight''': {
'''key''': '''mid_block.attentions.0.key.weight''',
'''query''': '''mid_block.attentions.0.query.weight''',
'''value''': '''mid_block.attentions.0.value.weight''',
},
}
assign_to_checkpoint(
A__ , A__ , A__ , attention_paths_to_split=A__ , config=A__ )
for i in range(A__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = i // (config['''num_res_blocks'''] + 1)
SCREAMING_SNAKE_CASE__ : Optional[Any] = i % (config['''num_res_blocks'''] + 1)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [shave_segments(A__ , 2 ) for name in output_blocks[i]]
SCREAMING_SNAKE_CASE__ : Any = {}
for layer in output_block_layers:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = layer.split('''.''' )[0], shave_segments(A__ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(A__ )
else:
SCREAMING_SNAKE_CASE__ : Any = [layer_name]
if len(A__ ) > 1:
SCREAMING_SNAKE_CASE__ : Any = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
SCREAMING_SNAKE_CASE__ : List[str] = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
SCREAMING_SNAKE_CASE__ : Any = renew_resnet_paths(A__ )
SCREAMING_SNAKE_CASE__ : Any = renew_resnet_paths(A__ )
SCREAMING_SNAKE_CASE__ : Tuple = {'''old''': f"""output_blocks.{i}.0""", '''new''': f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(A__ , A__ , A__ , additional_replacements=[meta_path] , config=A__ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
SCREAMING_SNAKE_CASE__ : List[str] = list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] )
SCREAMING_SNAKE_CASE__ : List[str] = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(A__ ) == 2:
SCREAMING_SNAKE_CASE__ : List[Any] = []
if len(A__ ):
SCREAMING_SNAKE_CASE__ : Any = renew_attention_paths(A__ )
SCREAMING_SNAKE_CASE__ : str = {
'''old''': f"""output_blocks.{i}.1""",
'''new''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
SCREAMING_SNAKE_CASE__ : Dict = {
f"""output_blocks.{i}.1.qkv.bias""": {
'''key''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'''query''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'''value''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
'''key''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'''query''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'''value''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
A__ , A__ , A__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None , config=A__ , )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = renew_resnet_paths(A__ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''.'''.join(['''output_blocks''', str(A__ ), path['''old''']] )
SCREAMING_SNAKE_CASE__ : int = '''.'''.join(['''up_blocks''', str(A__ ), '''resnets''', str(A__ ), path['''new''']] )
SCREAMING_SNAKE_CASE__ : Tuple = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
a_ :Dict = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
a_ :Optional[Any] = parser.parse_args()
a_ :Any = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
a_ :List[str] = json.loads(f.read())
a_ :Tuple = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
a_ :int = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
a_ :Optional[int] = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
a_ :Dict = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
a_ :Optional[Any] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 35 |
# Function to print upper half of diamond (pyramid)
def a ( a ) ->Optional[Any]:
'''simple docstring'''
for i in range(0 , a ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(''' ''' , end='''''' )
for _ in range(0 , i + 1 ): # printing stars
print('''* ''' , end='''''' )
print()
def a ( a ) ->Union[str, Any]:
'''simple docstring'''
for i in range(a , 0 , -1 ):
for _ in range(a , 0 , -1 ): # printing stars
print('''* ''' , end='''''' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(''' ''' , end='''''' )
def a ( a ) ->Optional[int]:
'''simple docstring'''
if n <= 0:
print(''' ... .... nothing printing :(''' )
return
floyd(a ) # upper half
reverse_floyd(a ) # lower half
if __name__ == "__main__":
print(R'| /\ | |- | |- |--| |\ /| |-')
print(R'|/ \| |- |_ |_ |__| | \/ | |_')
__lowerCAmelCase = 1
while K:
__lowerCAmelCase = int(input('enter the number and , and see the magic : '))
print()
pretty_print(user_number)
__lowerCAmelCase = int(input('press 0 to exit... and 1 to continue...'))
print('Good Bye...') | 201 | 0 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = ['''image_processor''', '''tokenizer''']
lowercase__ = '''ChineseCLIPImageProcessor'''
lowercase__ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : int , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : List[Any]=None , **UpperCamelCase__ : int ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCamelCase__ , )
__UpperCamelCase =kwargs.pop('''feature_extractor''' )
__UpperCamelCase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =self.image_processor
def __call__( self : List[str] , UpperCamelCase__ : str=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : int=None , **UpperCamelCase__ : Dict ) -> Dict:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__UpperCamelCase =self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if images is not None:
__UpperCamelCase =self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None and images is not None:
__UpperCamelCase =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def UpperCAmelCase_ ( self : Any , *UpperCamelCase__ : Dict , **UpperCamelCase__ : str ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase_ ( self : int , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[str] ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def UpperCAmelCase_ ( self : Any ) -> str:
'''simple docstring'''
__UpperCamelCase =self.tokenizer.model_input_names
__UpperCamelCase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase_ ( self : Any ) -> int:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCamelCase__ , )
return self.image_processor_class
| 296 | """simple docstring"""
import sys
__lowercase = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def lowerCAmelCase (__UpperCamelCase : str = N ):
"""simple docstring"""
__UpperCamelCase =-sys.maxsize - 1
for i in range(len(__UpperCamelCase ) - 1_2 ):
__UpperCamelCase =1
for j in range(1_3 ):
product *= int(n[i + j] )
if product > largest_product:
__UpperCamelCase =product
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 296 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Tuple = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase__ : int = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
UpperCAmelCase__ : str = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
UpperCAmelCase__ : List[Any] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
UpperCAmelCase__ : List[Any] = {"""unk_token""": """<unk>"""}
UpperCAmelCase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_lowerCAmelCase ) )
UpperCAmelCase__ : Optional[Any] = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"""image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
UpperCAmelCase__ : Tuple = os.path.join(self.tmpdirname , _lowerCAmelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def __UpperCAmelCase ( self , **_lowerCAmelCase ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __UpperCAmelCase ( self , **_lowerCAmelCase ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __UpperCAmelCase ( self , **_lowerCAmelCase ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __UpperCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase__ : Union[str, Any] = [Image.fromarray(np.moveaxis(_lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : int = self.get_tokenizer()
UpperCAmelCase__ : List[str] = self.get_rust_tokenizer()
UpperCAmelCase__ : int = self.get_image_processor()
UpperCAmelCase__ : Tuple = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
UpperCAmelCase__ : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCAmelCase )
UpperCAmelCase__ : Dict = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
UpperCAmelCase__ : Optional[Any] = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCAmelCase )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Tuple = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ : str = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCAmelCase__ : str = self.get_image_processor(do_normalize=_lowerCAmelCase , padding_value=1.0 )
UpperCAmelCase__ : str = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCAmelCase )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = self.get_image_processor()
UpperCAmelCase__ : Tuple = self.get_tokenizer()
UpperCAmelCase__ : str = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
UpperCAmelCase__ : str = self.prepare_image_inputs()
UpperCAmelCase__ : str = image_processor(_lowerCAmelCase , return_tensors="""np""" )
UpperCAmelCase__ : List[Any] = processor(images=_lowerCAmelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Union[str, Any] = self.get_image_processor()
UpperCAmelCase__ : List[Any] = self.get_tokenizer()
UpperCAmelCase__ : Optional[Any] = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = """lower newer"""
UpperCAmelCase__ : int = processor(text=_lowerCAmelCase )
UpperCAmelCase__ : Dict = tokenizer(_lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = self.get_image_processor()
UpperCAmelCase__ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase__ : List[Any] = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = """lower newer"""
UpperCAmelCase__ : int = self.prepare_image_inputs()
UpperCAmelCase__ : Optional[int] = processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : int = self.get_image_processor()
UpperCAmelCase__ : Any = self.get_tokenizer()
UpperCAmelCase__ : Tuple = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
UpperCAmelCase__ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase__ : Any = processor.batch_decode(_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = tokenizer.batch_decode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Optional[Any] = self.get_image_processor()
UpperCAmelCase__ : List[str] = self.get_tokenizer()
UpperCAmelCase__ : Any = CLIPProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
UpperCAmelCase__ : str = """lower newer"""
UpperCAmelCase__ : Optional[Any] = self.prepare_image_inputs()
UpperCAmelCase__ : List[Any] = processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 79 |
'''simple docstring'''
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
_UpperCamelCase =[redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('''All input parameters must be positive''' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('''Relative densities cannot be greater than one''' )
else:
_UpperCamelCase =1 - (matter_density + radiation_density + dark_energy)
_UpperCamelCase =(
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
_UpperCamelCase =hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
__lowerCamelCase : Optional[Any] = 0.3
print(
hubble_parameter(
hubble_constant=6_8.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 404 | 0 |
'''simple docstring'''
import os
import sys
import unittest
lowercase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowercase__ = os.path.join(git_repo_path, "src", "transformers")
lowercase__ = "\n{0} = None\n"
lowercase__ = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n"
lowercase__ = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def a_ ( self ) -> Optional[Any]:
_a = find_backend(" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")" )
self.assertIsNone(__UpperCamelCase )
_a = find_backend(" if not is_tokenizers_available():" )
self.assertEqual(__UpperCamelCase , "tokenizers" )
_a = find_backend(" if not is_tensorflow_text_available():" )
self.assertEqual(__UpperCamelCase , "tensorflow_text" )
_a = find_backend(" if not (is_sentencepiece_available() and is_tokenizers_available()):" )
self.assertEqual(__UpperCamelCase , "sentencepiece_and_tokenizers" )
_a = find_backend(
" if not (is_sentencepiece_available() and is_tensorflow_text_available()):" )
self.assertEqual(__UpperCamelCase , "sentencepiece_and_tensorflow_text" )
_a = find_backend(
" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):" )
self.assertEqual(__UpperCamelCase , "sentencepiece_and_tokenizers_and_vision" )
def a_ ( self ) -> str:
_a = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , __UpperCamelCase )
self.assertIn("tensorflow_text" , __UpperCamelCase )
self.assertIn("sentencepiece_and_tokenizers" , __UpperCamelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn("BertModel" , objects["torch"] )
self.assertIn("TFBertModel" , objects["tf"] )
self.assertIn("FlaxBertModel" , objects["flax"] )
self.assertIn("BertModel" , objects["torch"] )
self.assertIn("TFBertTokenizer" , objects["tensorflow_text"] )
self.assertIn("convert_slow_tokenizer" , objects["sentencepiece_and_tokenizers"] )
def a_ ( self ) -> List[Any]:
_a = create_dummy_object("CONSTANT" , "'torch'" )
self.assertEqual(__UpperCamelCase , "\nCONSTANT = None\n" )
_a = create_dummy_object("function" , "'torch'" )
self.assertEqual(
__UpperCamelCase , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
_a = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n"
_a = create_dummy_object("FakeClass" , "'torch'" )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def a_ ( self ) -> Union[str, Any]:
_a = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n"
_a = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , __UpperCamelCase )
| 276 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
UpperCAmelCase = '''convbert'''
def __init__( self , __UpperCamelCase=30_522 , __UpperCamelCase=768 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=3_072 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=512 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=1e-12 , __UpperCamelCase=1 , __UpperCamelCase=0 , __UpperCamelCase=2 , __UpperCamelCase=768 , __UpperCamelCase=2 , __UpperCamelCase=9 , __UpperCamelCase=1 , __UpperCamelCase=None , **__UpperCamelCase , ) -> Optional[int]:
super().__init__(
pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase , )
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = embedding_size
_a = head_ratio
_a = conv_kernel_size
_a = num_groups
_a = classifier_dropout
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
@property
def a_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_a = {0: "batch", 1: "choice", 2: "sequence"}
else:
_a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 276 | 1 |
'''simple docstring'''
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__lowerCamelCase : Union[str, Any] = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
__lowerCamelCase : Optional[Any] = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
__lowerCamelCase : Optional[int] = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
def remove_articles(lowerCAmelCase_ ):
lowercase = re.compile(R"\b(a|an|the)\b" , re.UNICODE )
return re.sub(snake_case__ , " " , snake_case__ )
def white_space_fix(lowerCAmelCase_ ):
return " ".join(text.split() )
def remove_punc(lowerCAmelCase_ ):
lowercase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCAmelCase_ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case__ ) ) ) )
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return int(normalize_answer(snake_case__ ) == normalize_answer(snake_case__ ) )
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = [any(compute_exact(snake_case__ , snake_case__ ) for ref in refs ) for pred, refs in zip(snake_case__ , snake_case__ )]
return (sum(snake_case__ ) / len(snake_case__ )) * 100
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = [rgram for rgrams in rgramslist for rgram in rgrams]
lowercase = Counter(snake_case__ )
lowercase = Counter(snake_case__ )
lowercase = Counter()
for sgram, scount in sgramcounter.items():
lowercase = scount * numref
lowercase = Counter(snake_case__ )
lowercase = Counter()
for cgram, ccount in cgramcounter.items():
lowercase = ccount * numref
# KEEP
lowercase = sgramcounter_rep & cgramcounter_rep
lowercase = keepgramcounter_rep & rgramcounter
lowercase = sgramcounter_rep & rgramcounter
lowercase = 0
lowercase = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowercase = 1
lowercase = 1
if len(snake_case__ ) > 0:
lowercase = keeptmpscorea / len(snake_case__ )
if len(snake_case__ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
lowercase = keeptmpscorea / sum(keepgramcounterall_rep.values() )
lowercase = 0
if keepscore_precision > 0 or keepscore_recall > 0:
lowercase = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
lowercase = sgramcounter_rep - cgramcounter_rep
lowercase = delgramcounter_rep - rgramcounter
lowercase = sgramcounter_rep - rgramcounter
lowercase = 0
lowercase = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowercase = 1
if len(snake_case__ ) > 0:
lowercase = deltmpscorea / len(snake_case__ )
# ADDITION
lowercase = set(snake_case__ ) - set(snake_case__ )
lowercase = set(snake_case__ ) & set(snake_case__ )
lowercase = set(snake_case__ ) - set(snake_case__ )
lowercase = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowercase = 1
lowercase = 1
if len(snake_case__ ) > 0:
lowercase = addtmpscore / len(snake_case__ )
if len(snake_case__ ) > 0:
lowercase = addtmpscore / len(snake_case__ )
lowercase = 0
if addscore_precision > 0 or addscore_recall > 0:
lowercase = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = len(snake_case__ )
lowercase = ssent.split(" " )
lowercase = csent.split(" " )
lowercase = []
lowercase = []
lowercase = []
lowercase = []
lowercase = []
lowercase = []
lowercase = []
lowercase = []
lowercase = []
lowercase = []
for rsent in rsents:
lowercase = rsent.split(" " )
lowercase = []
lowercase = []
lowercase = []
ragramslist.append(snake_case__ )
for i in range(0 , len(snake_case__ ) - 1 ):
if i < len(snake_case__ ) - 1:
lowercase = ragrams[i] + " " + ragrams[i + 1]
ragrams.append(snake_case__ )
if i < len(snake_case__ ) - 2:
lowercase = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2]
ragrams.append(snake_case__ )
if i < len(snake_case__ ) - 3:
lowercase = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3]
ragrams.append(snake_case__ )
ragramslist.append(snake_case__ )
ragramslist.append(snake_case__ )
ragramslist.append(snake_case__ )
for i in range(0 , len(snake_case__ ) - 1 ):
if i < len(snake_case__ ) - 1:
lowercase = sagrams[i] + " " + sagrams[i + 1]
sagrams.append(snake_case__ )
if i < len(snake_case__ ) - 2:
lowercase = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2]
sagrams.append(snake_case__ )
if i < len(snake_case__ ) - 3:
lowercase = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3]
sagrams.append(snake_case__ )
for i in range(0 , len(snake_case__ ) - 1 ):
if i < len(snake_case__ ) - 1:
lowercase = cagrams[i] + " " + cagrams[i + 1]
cagrams.append(snake_case__ )
if i < len(snake_case__ ) - 2:
lowercase = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2]
cagrams.append(snake_case__ )
if i < len(snake_case__ ) - 3:
lowercase = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3]
cagrams.append(snake_case__ )
((lowercase) , (lowercase) , (lowercase)) = SARIngram(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
((lowercase) , (lowercase) , (lowercase)) = SARIngram(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
((lowercase) , (lowercase) , (lowercase)) = SARIngram(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
((lowercase) , (lowercase) , (lowercase)) = SARIngram(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowercase = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
lowercase = sum([delascore, delascore, delascore, delascore] ) / 4
lowercase = sum([addascore, addascore, addascore, addascore] ) / 4
lowercase = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ = True , lowerCAmelCase_ = "13a" , lowerCAmelCase_ = True ):
"""simple docstring"""
if lowercase:
lowercase = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
lowercase = sacrebleu.metrics.bleu._get_tokenizer(snake_case__ )()(snake_case__ )
else:
lowercase = sacrebleu.TOKENIZERS[tokenizer]()(snake_case__ )
elif tokenizer == "moses":
lowercase = sacremoses.MosesTokenizer().tokenize(snake_case__ , return_str=snake_case__ , escape=snake_case__ )
elif tokenizer == "penn":
lowercase = sacremoses.MosesTokenizer().penn_tokenize(snake_case__ , return_str=snake_case__ )
else:
lowercase = sentence
if not return_str:
lowercase = normalized_sent.split()
return normalized_sent
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if not (len(snake_case__ ) == len(snake_case__ ) == len(snake_case__ )):
raise ValueError("Sources length must match predictions and references lengths." )
lowercase = 0
for src, pred, refs in zip(snake_case__ , snake_case__ , snake_case__ ):
sari_score += SARIsent(normalize(snake_case__ ) , normalize(snake_case__ ) , [normalize(snake_case__ ) for sent in refs] )
lowercase = sari_score / len(snake_case__ )
return 100 * sari_score
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="exp" , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ):
"""simple docstring"""
lowercase = len(references[0] )
if any(len(snake_case__ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
lowercase = [[refs[i] for refs in references] for i in range(snake_case__ )]
lowercase = sacrebleu.corpus_bleu(
snake_case__ , snake_case__ , smooth_method=snake_case__ , smooth_value=snake_case__ , force=snake_case__ , lowercase=snake_case__ , use_effective_order=snake_case__ , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def UpperCAmelCase__ (self : List[Any] ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=[
"https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py",
"https://github.com/cocoxu/simplification/blob/master/SARI.py",
"https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py",
"https://github.com/mjpost/sacreBLEU",
] , reference_urls=[
"https://www.aclweb.org/anthology/Q16-1029.pdf",
"https://github.com/mjpost/sacreBLEU",
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def UpperCAmelCase__ (self : Optional[Any] , A__ : Optional[int] , A__ : str , A__ : Any ) -> Union[str, Any]:
lowercase = {}
result.update({"sari": compute_sari(sources=lowercase_ , predictions=lowercase_ , references=lowercase_ )} )
result.update({"sacrebleu": compute_sacrebleu(predictions=lowercase_ , references=lowercase_ )} )
result.update({"exact": compute_em(predictions=lowercase_ , references=lowercase_ )} )
return result
| 310 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
UpperCAmelCase__ : List[Any] = (3, 9, -11, 0, 7, 5, 1, -1)
UpperCAmelCase__ : Optional[Any] = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __lowercase :
__UpperCAmelCase = 42
__UpperCAmelCase = 42
class __lowercase :
def __init__( self , lowercase_) -> None:
__snake_case = None
for i in sorted(lowercase_ , reverse=lowercase_):
__snake_case = Node(lowercase_ , self.head)
def __iter__( self) -> Iterator[int]:
__snake_case = self.head
while node:
yield node.data
__snake_case = node.next_node
def __len__( self) -> int:
return sum(1 for _ in self)
def __str__( self) -> str:
return " -> ".join([str(lowercase_) for node in self])
def A ( snake_case__ : SortedLinkedList , snake_case__ : SortedLinkedList ) -> SortedLinkedList:
'''simple docstring'''
return SortedLinkedList(list(snake_case__ ) + list(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ : Optional[int] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 313 | 0 |
"""simple docstring"""
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def __a ( A ):
'''simple docstring'''
lowercase__ = checkpoints.load_tax_checkpoint(A )
lowercase__ = flatten_dict(A )
return flax_params
def __a ( A ):
'''simple docstring'''
lowercase__ = {}
lowercase__ = {
"token_embedder": "embeddings",
"encoder_norm": "layernorm",
"kernel": "weight",
".out": ".output",
"scale": "weight",
"embedders_0.pos_embedding": "row_embedder.weight",
"embedders_1.pos_embedding": "column_embedder.weight",
}
lowercase__ = {
"query": "attention.query",
"key": "attention.key",
"value": "attention.value",
"output.dense": "output",
"encoder_decoder_attention.o": "encoder_decoder_attention.attention.o",
"pre_self_attention_layer_norm": "self_attention.layer_norm",
"pre_cross_attention_layer_norm": "encoder_decoder_attention.layer_norm",
"mlp.": "mlp.DenseReluDense.",
"pre_mlp_layer_norm": "mlp.layer_norm",
"self_attention.o": "self_attention.attention.o",
"decoder.embeddings.embedding": "decoder.embed_tokens.weight",
"decoder.relpos_bias.rel_embedding": "decoder.layer.0.self_attention.attention.relative_attention_bias.weight",
"decoder.decoder_norm.weight": "decoder.final_layer_norm.weight",
"decoder.logits_dense.weight": "decoder.lm_head.weight",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
lowercase__ = ".".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
lowercase__ = new_key.replace(A , A )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
lowercase__ = new_key.replace(A , A )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
lowercase__ = re.sub(r"layers_(\d+)" , r"layer.\1" , A )
lowercase__ = new_key.replace("encoder" , "encoder.encoder" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
lowercase__ = re.sub(r"layers_(\d+)" , r"layer.\1" , A )
lowercase__ = flax_dict[key]
lowercase__ = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
lowercase__ = torch.from_numpy(converted_dict[key].T )
else:
lowercase__ = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def __a ( A , A , A=False , A=False ):
'''simple docstring'''
lowercase__ = get_flax_param(A )
if not use_large:
lowercase__ = PixaStructVisionConfig()
lowercase__ = PixaStructTextConfig()
else:
lowercase__ = PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 )
lowercase__ = PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 )
lowercase__ = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=A )
lowercase__ = PixaStructForConditionalGeneration(A )
lowercase__ = rename_and_convert_flax_params(A )
model.load_state_dict(A )
lowercase__ = AutoTokenizer.from_pretrained("ybelkada/test-pix2struct-tokenizer" )
lowercase__ = PixaStructImageProcessor()
lowercase__ = PixaStructProcessor(image_processor=A , tokenizer=A )
if use_large:
lowercase__ = 40_96
lowercase__ = True
# mkdir if needed
os.makedirs(A , exist_ok=A )
model.save_pretrained(A )
processor.save_pretrained(A )
print("Model saved in {}".format(A ) )
if __name__ == "__main__":
lowerCAmelCase_: List[str] = argparse.ArgumentParser()
parser.add_argument("--t5x_checkpoint_path", default=None, type=str, help="Path to the original T5x checkpoint.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--use_large", action="store_true", help="Use large model.")
parser.add_argument("--is_vqa", action="store_true", help="Use large model.")
lowerCAmelCase_: int = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 668 | """simple docstring"""
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class a__ ( _a ):
def __init__( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, _UpperCAmelCase = None, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
_UpperCAmelCase, split=_UpperCAmelCase, features=_UpperCAmelCase, cache_dir=_UpperCAmelCase, keep_in_memory=_UpperCAmelCase, streaming=_UpperCAmelCase, num_proc=_UpperCAmelCase, **_UpperCAmelCase, )
lowercase__ = path_or_paths if isinstance(_UpperCAmelCase, _UpperCAmelCase ) else {self.split: path_or_paths}
lowercase__ = Text(
cache_dir=_UpperCAmelCase, data_files=_UpperCAmelCase, features=_UpperCAmelCase, **_UpperCAmelCase, )
def snake_case__ ( self ):
'''simple docstring'''
if self.streaming:
lowercase__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase, download_mode=_UpperCAmelCase, verification_mode=_UpperCAmelCase, base_path=_UpperCAmelCase, num_proc=self.num_proc, )
lowercase__ = self.builder.as_dataset(
split=self.split, verification_mode=_UpperCAmelCase, in_memory=self.keep_in_memory )
return dataset
| 668 | 1 |
"""simple docstring"""
def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] ) -> int:
if len(_lowercase ) != len(_lowercase ):
raise ValueError('''The length of profit and weight must be same.''' )
if max_weight <= 0:
raise ValueError('''max_weight must greater than zero.''' )
if any(p < 0 for p in profit ):
raise ValueError('''Profit can not be negative.''' )
if any(w < 0 for w in weight ):
raise ValueError('''Weight can not be negative.''' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
_snake_case = [p / w for p, w in zip(_lowercase , _lowercase )]
# Creating a copy of the list and sorting profit/weight in ascending order
_snake_case = sorted(_lowercase )
# declaring useful variables
_snake_case = len(_lowercase )
_snake_case = 0
_snake_case = 0
_snake_case = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
_snake_case = sorted_profit_by_weight[length - i - 1]
_snake_case = profit_by_weight.index(_lowercase )
_snake_case = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'Input profits, weights, and then max_weight (all positive ints) separated by '
'spaces.'
)
UpperCAmelCase__ = [int(x) for x in input('Input profits separated by spaces: ').split()]
UpperCAmelCase__ = [int(x) for x in input('Input weights separated by spaces: ').split()]
UpperCAmelCase__ = int(input('Max weight allowed: '))
# Function Call
calc_profit(profit, weight, max_weight)
| 224 | import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCamelCase = logging.get_logger(__name__)
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : str = """linear"""
lowerCamelCase_ : int = """cosine"""
lowerCamelCase_ : str = """cosine_with_restarts"""
lowerCamelCase_ : Union[str, Any] = """polynomial"""
lowerCamelCase_ : Tuple = """constant"""
lowerCamelCase_ : List[Any] = """constant_with_warmup"""
lowerCamelCase_ : Optional[int] = """piecewise_constant"""
def lowerCamelCase_ ( _lowercase , _lowercase = -1 ) -> Tuple:
return LambdaLR(_lowercase , lambda _lowercase : 1 , last_epoch=_lowercase )
def lowerCamelCase_ ( _lowercase , _lowercase , _lowercase = -1 ) -> Union[str, Any]:
def lr_lambda(_lowercase ):
if current_step < num_warmup_steps:
return float(_lowercase ) / float(max(1.0 , _lowercase ) )
return 1.0
return LambdaLR(_lowercase , _lowercase , last_epoch=_lowercase )
def lowerCamelCase_ ( _lowercase , _lowercase , _lowercase = -1 ) -> Any:
__A : str = {}
__A : str = step_rules.split("," )
for rule_str in rule_list[:-1]:
__A , __A : List[Any] = rule_str.split(":" )
__A : Dict = int(_lowercase )
__A : Dict = float(_lowercase )
__A : List[Any] = value
__A : List[Any] = float(rule_list[-1] )
def create_rules_function(_lowercase , _lowercase ):
def rule_func(_lowercase ) -> float:
__A : Optional[int] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_lowercase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__A : Dict = create_rules_function(_lowercase , _lowercase )
return LambdaLR(_lowercase , _lowercase , last_epoch=_lowercase )
def lowerCamelCase_ ( _lowercase , _lowercase , _lowercase , _lowercase=-1 ) -> Dict:
def lr_lambda(_lowercase ):
if current_step < num_warmup_steps:
return float(_lowercase ) / float(max(1 , _lowercase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_lowercase , _lowercase , _lowercase )
def lowerCamelCase_ ( _lowercase , _lowercase , _lowercase , _lowercase = 0.5 , _lowercase = -1 ) -> int:
def lr_lambda(_lowercase ):
if current_step < num_warmup_steps:
return float(_lowercase ) / float(max(1 , _lowercase ) )
__A : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_lowercase ) * 2.0 * progress )) )
return LambdaLR(_lowercase , _lowercase , _lowercase )
def lowerCamelCase_ ( _lowercase , _lowercase , _lowercase , _lowercase = 1 , _lowercase = -1 ) -> Tuple:
def lr_lambda(_lowercase ):
if current_step < num_warmup_steps:
return float(_lowercase ) / float(max(1 , _lowercase ) )
__A : Any = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_lowercase ) * progress) % 1.0) )) )
return LambdaLR(_lowercase , _lowercase , _lowercase )
def lowerCamelCase_ ( _lowercase , _lowercase , _lowercase , _lowercase=1E-7 , _lowercase=1.0 , _lowercase=-1 ) -> Optional[Any]:
__A : str = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})" )
def lr_lambda(_lowercase ):
if current_step < num_warmup_steps:
return float(_lowercase ) / float(max(1 , _lowercase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__A : Optional[Any] = lr_init - lr_end
__A : List[Any] = num_training_steps - num_warmup_steps
__A : Tuple = 1 - (current_step - num_warmup_steps) / decay_steps
__A : Optional[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_lowercase , _lowercase , _lowercase )
UpperCamelCase = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCamelCase_ ( _lowercase , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = 1 , _lowercase = 1.0 , _lowercase = -1 , ) -> Dict:
__A : List[str] = SchedulerType(_lowercase )
__A : int = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_lowercase , last_epoch=_lowercase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_lowercase , step_rules=_lowercase , last_epoch=_lowercase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"{name} requires `num_warmup_steps`, please provide that argument." )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_lowercase , num_warmup_steps=_lowercase , last_epoch=_lowercase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"{name} requires `num_training_steps`, please provide that argument." )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_lowercase , num_warmup_steps=_lowercase , num_training_steps=_lowercase , num_cycles=_lowercase , last_epoch=_lowercase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_lowercase , num_warmup_steps=_lowercase , num_training_steps=_lowercase , power=_lowercase , last_epoch=_lowercase , )
return schedule_func(
_lowercase , num_warmup_steps=_lowercase , num_training_steps=_lowercase , last_epoch=_lowercase )
| 520 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
_lowerCAmelCase = None
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_lowerCAmelCase = {
"vocab_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model",
},
"tokenizer_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json",
},
}
_lowerCAmelCase = {
"google/fnet-base": 5_12,
"google/fnet-large": 5_12,
}
_lowerCAmelCase = "▁"
class UpperCamelCase (_UpperCAmelCase ):
_SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : Tuple = ['''input_ids''', '''token_type_ids''']
_SCREAMING_SNAKE_CASE : Union[str, Any] = FNetTokenizer
def __init__( self :List[Any] , __magic_name__ :Dict=None , __magic_name__ :List[str]=None , __magic_name__ :str=False , __magic_name__ :Union[str, Any]=True , __magic_name__ :Union[str, Any]=True , __magic_name__ :List[str]="<unk>" , __magic_name__ :Optional[int]="[SEP]" , __magic_name__ :Any="<pad>" , __magic_name__ :Dict="[CLS]" , __magic_name__ :Any="[MASK]" , **__magic_name__ :Optional[Any] , ) ->Union[str, Any]:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowercase : Tuple = (
AddedToken(A_ , lstrip=A_ , rstrip=A_ , normalized=A_ )
if isinstance(A_ , A_ )
else mask_token
)
super().__init__(
A_ , tokenizer_file=A_ , do_lower_case=A_ , remove_space=A_ , keep_accents=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , **A_ , )
lowercase : int = do_lower_case
lowercase : Any = remove_space
lowercase : Any = keep_accents
lowercase : Any = vocab_file
lowercase : Optional[int] = False if not self.vocab_file else True
def __snake_case ( self :List[str] , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ) ->List[int]:
lowercase : Optional[Any] = [self.sep_token_id]
lowercase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __snake_case ( self :Any , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ) ->List[int]:
lowercase : Optional[int] = [self.sep_token_id]
lowercase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case ( self :Any , __magic_name__ :str , __magic_name__ :Optional[str] = None ) ->Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase : Tuple = os.path.join(
A_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file , A_ )
return (out_vocab_file,)
| 705 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCamelCase (metaclass=__snake_case ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["""flax""", """transformers"""]
def __init__( self :List[str] , *__magic_name__ :int , **__magic_name__ :Tuple ) ->Dict:
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def __snake_case ( cls :List[Any] , *__magic_name__ :Any , **__magic_name__ :Union[str, Any] ) ->Dict:
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def __snake_case ( cls :str , *__magic_name__ :Optional[Any] , **__magic_name__ :Union[str, Any] ) ->Tuple:
requires_backends(cls , ["""flax""", """transformers"""] )
class UpperCamelCase (metaclass=__snake_case ):
_SCREAMING_SNAKE_CASE : List[str] = ["""flax""", """transformers"""]
def __init__( self :str , *__magic_name__ :int , **__magic_name__ :List[str] ) ->str:
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def __snake_case ( cls :Optional[int] , *__magic_name__ :Tuple , **__magic_name__ :Dict ) ->Dict:
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def __snake_case ( cls :Tuple , *__magic_name__ :Tuple , **__magic_name__ :Optional[int] ) ->Optional[Any]:
requires_backends(cls , ["""flax""", """transformers"""] )
class UpperCamelCase (metaclass=__snake_case ):
_SCREAMING_SNAKE_CASE : Tuple = ["""flax""", """transformers"""]
def __init__( self :Tuple , *__magic_name__ :Dict , **__magic_name__ :Optional[int] ) ->Dict:
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def __snake_case ( cls :List[str] , *__magic_name__ :Any , **__magic_name__ :Tuple ) ->Dict:
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def __snake_case ( cls :Any , *__magic_name__ :List[Any] , **__magic_name__ :Optional[Any] ) ->Optional[int]:
requires_backends(cls , ["""flax""", """transformers"""] )
class UpperCamelCase (metaclass=__snake_case ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ["""flax""", """transformers"""]
def __init__( self :List[str] , *__magic_name__ :int , **__magic_name__ :Dict ) ->Optional[int]:
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def __snake_case ( cls :str , *__magic_name__ :Any , **__magic_name__ :Any ) ->Optional[int]:
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def __snake_case ( cls :int , *__magic_name__ :List[str] , **__magic_name__ :Any ) ->Any:
requires_backends(cls , ["""flax""", """transformers"""] )
| 348 | 0 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
lowerCamelCase__ = Mapping[str, np.ndarray]
lowerCamelCase__ = Mapping[str, Any] # Is a nested dict.
lowerCamelCase__ = 0.01
@dataclasses.dataclass(frozen=snake_case__ )
class UpperCamelCase :
__UpperCamelCase = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
__UpperCamelCase = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
__UpperCamelCase = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
__UpperCamelCase = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
__UpperCamelCase = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
__UpperCamelCase = None
# Optional remark about the protein. Included as a comment in output PDB
# files
__UpperCamelCase = None
# Templates used to generate this protein (prediction-only)
__UpperCamelCase = None
# Chain corresponding to each parent
__UpperCamelCase = None
def _lowerCamelCase( __snake_case ) -> Protein:
__snake_case = r"(\[[A-Z]+\]\n)"
__snake_case = [tag.strip() for tag in re.split(__snake_case , __snake_case ) if len(__snake_case ) > 0]
__snake_case = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] )
__snake_case = ["N", "CA", "C"]
__snake_case = None
__snake_case = None
__snake_case = None
for g in groups:
if "[PRIMARY]" == g[0]:
__snake_case = g[1][0].strip()
for i in range(len(__snake_case ) ):
if seq[i] not in residue_constants.restypes:
__snake_case = "X" # FIXME: strings are immutable
__snake_case = np.array(
[residue_constants.restype_order.get(__snake_case , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
__snake_case = []
for axis in range(3 ):
tertiary.append(list(map(__snake_case , g[1][axis].split() ) ) )
__snake_case = np.array(__snake_case )
__snake_case = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__snake_case ):
__snake_case = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
__snake_case = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) )
__snake_case = np.zeros(
(
len(__snake_case ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__snake_case ):
__snake_case = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__snake_case , atom_mask=__snake_case , aatype=__snake_case , residue_index=np.arange(len(__snake_case ) ) , b_factors=__snake_case , )
def _lowerCamelCase( __snake_case , __snake_case = 0 ) -> List[str]:
__snake_case = []
__snake_case = prot.remark
if remark is not None:
pdb_headers.append(f"""REMARK {remark}""" )
__snake_case = prot.parents
__snake_case = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
__snake_case = [p for i, p in zip(__snake_case , __snake_case ) if i == chain_id]
if parents is None or len(__snake_case ) == 0:
__snake_case = ["N/A"]
pdb_headers.append(f"""PARENT {" ".join(__snake_case )}""" )
return pdb_headers
def _lowerCamelCase( __snake_case , __snake_case ) -> str:
__snake_case = []
__snake_case = pdb_str.split("\n" )
__snake_case = prot.remark
if remark is not None:
out_pdb_lines.append(f"""REMARK {remark}""" )
__snake_case = 42
if prot.parents is not None and len(prot.parents ) > 0:
__snake_case = []
if prot.parents_chain_index is not None:
__snake_case = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__snake_case ) , [] )
parent_dict[str(__snake_case )].append(__snake_case )
__snake_case = max([int(__snake_case ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
__snake_case = parent_dict.get(str(__snake_case ) , ["N/A"] )
parents_per_chain.append(__snake_case )
else:
parents_per_chain.append(list(prot.parents ) )
else:
__snake_case = [["N/A"]]
def make_parent_line(__snake_case ) -> str:
return f"""PARENT {" ".join(__snake_case )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
__snake_case = 0
for i, l in enumerate(__snake_case ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__snake_case )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__snake_case ):
__snake_case = parents_per_chain[chain_counter]
else:
__snake_case = ["N/A"]
out_pdb_lines.append(make_parent_line(__snake_case ) )
return "\n".join(__snake_case )
def _lowerCamelCase( __snake_case ) -> str:
__snake_case = residue_constants.restypes + ["X"]
def res_atoa(__snake_case ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , "UNK" )
__snake_case = residue_constants.atom_types
__snake_case = []
__snake_case = prot.atom_mask
__snake_case = prot.aatype
__snake_case = prot.atom_positions
__snake_case = prot.residue_index.astype(np.intaa )
__snake_case = prot.b_factors
__snake_case = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
__snake_case = get_pdb_headers(__snake_case )
if len(__snake_case ) > 0:
pdb_lines.extend(__snake_case )
__snake_case = aatype.shape[0]
__snake_case = 1
__snake_case = 0
__snake_case = string.ascii_uppercase
__snake_case = None
# Add all atom sites.
for i in range(__snake_case ):
__snake_case = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__snake_case , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
__snake_case = "ATOM"
__snake_case = atom_name if len(__snake_case ) == 4 else f""" {atom_name}"""
__snake_case = ""
__snake_case = ""
__snake_case = 1.0_0
__snake_case = atom_name[0] # Protein supports only C, N, O, S, this works.
__snake_case = ""
__snake_case = "A"
if chain_index is not None:
__snake_case = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
__snake_case = (
f"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
f"""{res_name_a:>3} {chain_tag:>1}"""
f"""{residue_index[i]:>4}{insertion_code:>1} """
f"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
f"""{occupancy:>6.2f}{b_factor:>6.2f} """
f"""{element:>2}{charge:>2}"""
)
pdb_lines.append(__snake_case )
atom_index += 1
__snake_case = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
__snake_case = True
__snake_case = chain_index[i + 1]
if should_terminate:
# Close the chain.
__snake_case = "TER"
__snake_case = (
f"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(__snake_case )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__snake_case , __snake_case ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(__snake_case )
def _lowerCamelCase( __snake_case ) -> np.ndarray:
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def _lowerCamelCase( __snake_case , __snake_case , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , ) -> Protein:
return Protein(
aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=__snake_case , remark=__snake_case , parents=__snake_case , parents_chain_index=__snake_case , )
| 524 | import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
lowerCamelCase__ = '0.12' # assumed parallelism: 8
if is_torch_available():
import torch
def _lowerCamelCase( __snake_case , __snake_case , __snake_case=None ) -> str:
if rng is None:
__snake_case = random.Random()
__snake_case = 1
for dim in shape:
total_dims *= dim
__snake_case = []
for _ in range(__snake_case ):
values.append(rng.randint(0 , vocab_size - 1 ) )
__snake_case = np.array(__snake_case , dtype=jnp.intaa ).reshape(__snake_case )
return output
def _lowerCamelCase( __snake_case , __snake_case=None ) -> Optional[int]:
__snake_case = ids_tensor(__snake_case , vocab_size=2 , rng=__snake_case )
# make sure that at least one token is attended to for each batch
__snake_case = 1
return attn_mask
@require_flax
class UpperCamelCase :
__UpperCamelCase = None
__UpperCamelCase = ()
def UpperCamelCase_ ( self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
__snake_case = 2
__snake_case = inputs["input_ids"].shape[-1] // 2
__snake_case = inputs["input_ids"][:max_batch_size, :sequence_length]
__snake_case = jnp.ones_like(_lowerCAmelCase )
__snake_case = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
__snake_case = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
__snake_case = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def UpperCamelCase_ ( self : str ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case = self._get_input_ids_and_config()
__snake_case = False
__snake_case = max_length
__snake_case = 0
for model_class in self.all_generative_model_classes:
__snake_case = model_class(_lowerCAmelCase )
__snake_case = model_class.__name__[4:] # Skip the "Flax" at the beginning
__snake_case = getattr(_lowerCAmelCase ,_lowerCAmelCase )
__snake_case = pt_model_class(_lowerCAmelCase ).eval()
__snake_case = load_flax_weights_in_pytorch_model(_lowerCAmelCase ,flax_model.params )
__snake_case = flax_model.generate(_lowerCAmelCase ).sequences
__snake_case = pt_model.generate(torch.tensor(_lowerCAmelCase ,dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
__snake_case = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist() )
def UpperCamelCase_ ( self : Tuple ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case = self._get_input_ids_and_config()
__snake_case = False
__snake_case = max_length
for model_class in self.all_generative_model_classes:
__snake_case = model_class(_lowerCAmelCase )
__snake_case = model.generate(_lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_lowerCAmelCase )
__snake_case = jit(model.generate )
__snake_case = jit_generate(_lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case = self._get_input_ids_and_config()
__snake_case = True
__snake_case = max_length
for model_class in self.all_generative_model_classes:
__snake_case = model_class(_lowerCAmelCase )
__snake_case = model.generate(_lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_lowerCAmelCase )
__snake_case = jit(model.generate )
__snake_case = jit_generate(_lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self : Dict ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case = self._get_input_ids_and_config()
__snake_case = False
__snake_case = max_length
__snake_case = 2
for model_class in self.all_generative_model_classes:
__snake_case = model_class(_lowerCAmelCase )
__snake_case = model.generate(_lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_lowerCAmelCase )
__snake_case = jit(model.generate )
__snake_case = jit_generate(_lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case = self._get_input_ids_and_config()
__snake_case = False
__snake_case = max_length
__snake_case = 2
__snake_case = 2
for model_class in self.all_generative_model_classes:
__snake_case = model_class(_lowerCAmelCase )
__snake_case = model.generate(_lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences )
def UpperCamelCase_ ( self : Tuple ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case = self._get_input_ids_and_config()
__snake_case = True
__snake_case = max_length
__snake_case = 0.8
__snake_case = 10
__snake_case = 0.3
__snake_case = 1
__snake_case = 8
__snake_case = 9
for model_class in self.all_generative_model_classes:
__snake_case = model_class(_lowerCAmelCase )
__snake_case = model.generate(_lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_lowerCAmelCase )
__snake_case = jit(model.generate )
__snake_case = jit_generate(_lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self : int ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case = self._get_input_ids_and_config()
__snake_case = max_length
__snake_case = 1
__snake_case = 8
__snake_case = 9
for model_class in self.all_generative_model_classes:
__snake_case = model_class(_lowerCAmelCase )
__snake_case = model.generate(_lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_lowerCAmelCase )
__snake_case = jit(model.generate )
__snake_case = jit_generate(_lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case = self._get_input_ids_and_config()
__snake_case = max_length
__snake_case = 2
__snake_case = 1
__snake_case = 8
__snake_case = 9
for model_class in self.all_generative_model_classes:
__snake_case = model_class(_lowerCAmelCase )
__snake_case = model.generate(_lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_lowerCAmelCase )
__snake_case = jit(model.generate )
__snake_case = jit_generate(_lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self : List[Any] ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case = self._get_input_ids_and_config()
# pad attention mask on the left
__snake_case = attention_mask.at[(0, 0)].set(0 )
__snake_case = False
__snake_case = max_length
for model_class in self.all_generative_model_classes:
__snake_case = model_class(_lowerCAmelCase )
__snake_case = model.generate(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_lowerCAmelCase )
__snake_case = jit(model.generate )
__snake_case = jit_generate(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self : int ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case = self._get_input_ids_and_config()
# pad attention mask on the left
__snake_case = attention_mask.at[(0, 0)].set(0 )
__snake_case = True
__snake_case = max_length
for model_class in self.all_generative_model_classes:
__snake_case = model_class(_lowerCAmelCase )
__snake_case = model.generate(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_lowerCAmelCase )
__snake_case = jit(model.generate )
__snake_case = jit_generate(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self : Any ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case = self._get_input_ids_and_config()
# pad attention mask on the left
__snake_case = attention_mask.at[(0, 0)].set(0 )
__snake_case = 2
__snake_case = max_length
for model_class in self.all_generative_model_classes:
__snake_case = model_class(_lowerCAmelCase )
__snake_case = model.generate(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_lowerCAmelCase )
__snake_case = jit(model.generate )
__snake_case = jit_generate(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
@require_flax
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self : int ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" )
__snake_case = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
__snake_case = "Hello world"
__snake_case = tokenizer(_lowerCAmelCase ,return_tensors="np" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(_lowerCAmelCase ,"do_samples" ):
model.generate(_lowerCAmelCase ,do_samples=_lowerCAmelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(_lowerCAmelCase ,"foo" ):
__snake_case = {"foo": "bar"}
model.generate(_lowerCAmelCase ,**_lowerCAmelCase )
| 524 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class _A( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _A , _A=7 , _A=3 , _A=30 , _A=400 , _A=True , _A=None , _A=0.9 , _A=None , _A=True , _A=[0.5, 0.5, 0.5] , _A=[0.5, 0.5, 0.5] , ):
__A : str = size if size is not None else {'shortest_edge': 30}
__A : Union[str, Any] = crop_size if crop_size is not None else {'height': 30, 'width': 30}
__A : List[Any] = parent
__A : List[str] = batch_size
__A : Dict = num_channels
__A : str = min_resolution
__A : Any = max_resolution
__A : Union[str, Any] = do_resize_and_center_crop
__A : Dict = size
__A : Any = crop_pct
__A : str = crop_size
__A : List[Any] = do_normalize
__A : Union[str, Any] = image_mean
__A : Tuple = image_std
def UpperCAmelCase_ ( self ):
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : int = PoolFormerImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ):
__A : str = PoolFormerImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ):
__A : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , 'do_resize_and_center_crop' ) )
self.assertTrue(hasattr(_A , 'size' ) )
self.assertTrue(hasattr(_A , 'crop_pct' ) )
self.assertTrue(hasattr(_A , 'do_normalize' ) )
self.assertTrue(hasattr(_A , 'image_mean' ) )
self.assertTrue(hasattr(_A , 'image_std' ) )
def UpperCAmelCase_ ( self ):
__A : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 30} )
self.assertEqual(image_processor.crop_size , {'height': 30, 'width': 30} )
__A : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
# Initialize image_processing
__A : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
__A : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__A : Tuple = image_processing(_A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase_ ( self ):
# Initialize image_processing
__A : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
__A : List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__A : Tuple = image_processing(_A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase_ ( self ):
# Initialize image_processing
__A : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
__A : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__A : List[Any] = image_processing(_A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 708 |
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def _SCREAMING_SNAKE_CASE ( ) -> None:
print('Making key files...' )
make_key_files('rsa' , 10_24 )
print('Key files generation successful.' )
def _SCREAMING_SNAKE_CASE ( a ) -> tuple[tuple[int, int], tuple[int, int]]:
print('Generating prime p...' )
__A : Optional[Any] = rabinMiller.generate_large_prime(a )
print('Generating prime q...' )
__A : Union[str, Any] = rabinMiller.generate_large_prime(a )
__A : Tuple = p * q
print('Generating e that is relatively prime to (p - 1) * (q - 1)...' )
while True:
__A : Dict = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(a , (p - 1) * (q - 1) ) == 1:
break
print('Calculating d that is mod inverse of e...' )
__A : Any = cryptoMath.find_mod_inverse(a , (p - 1) * (q - 1) )
__A : Dict = (n, e)
__A : Dict = (n, d)
return (public_key, private_key)
def _SCREAMING_SNAKE_CASE ( a , a ) -> None:
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print('\nWARNING:' )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'Use a different name or delete these files and re-run this program.' )
sys.exit()
__A , __A : Optional[int] = generate_key(a )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , 'w' ) as out_file:
out_file.write(F"""{key_size},{public_key[0]},{public_key[1]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , 'w' ) as out_file:
out_file.write(F"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main()
| 77 | 0 |
"""simple docstring"""
from __future__ import annotations
def __lowercase ( _a ):
if not nums:
return 0
snake_case_ : List[str] = nums[0]
snake_case_ : Tuple = 0
for num in nums[1:]:
snake_case_, snake_case_ : int = (
max_excluding + num,
max(__A , __A ),
)
return max(__A , __A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 123 |
from collections import deque
def A__ ( __A : Optional[Any] ) ->Tuple:
__A =len(__A )
__A =deque()
__A =[False for _ in range(__A )]
__A =[-1 for _ in range(__A )]
__A =index_of[:]
def strong_connect(__A : Union[str, Any] , __A : int , __A : Optional[int] ):
__A =index # the number when this node is seen
__A =index # lowest rank node reachable from here
index += 1
stack.append(__A )
__A =True
for w in g[v]:
if index_of[w] == -1:
__A =strong_connect(__A , __A , __A )
__A =(
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
__A =(
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
__A =[]
__A =stack.pop()
__A =False
component.append(__A )
while w != v:
__A =stack.pop()
__A =False
component.append(__A )
components.append(__A )
return index
__A =[]
for v in range(__A ):
if index_of[v] == -1:
strong_connect(__A , 0 , __A )
return components
def A__ ( __A : List[Any] , __A : Optional[int] ) ->Tuple:
__A =[[] for _ in range(__A )]
for u, v in edges:
g[u].append(__A )
return g
if __name__ == "__main__":
# Test
_lowerCamelCase : int = 7
_lowerCamelCase : Tuple = [0, 0, 1, 2, 3, 3, 4, 4, 6]
_lowerCamelCase : Dict = [1, 3, 2, 0, 1, 4, 5, 6, 5]
_lowerCamelCase : Optional[Any] = [(u, v) for u, v in zip(source, target)]
_lowerCamelCase : Dict = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 184 | 0 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__lowercase : Optional[int] =logging.get_logger(__name__)
class A ( __lowercase ):
def __init__( self: Tuple , *_lowerCAmelCase: List[Any] , **_lowerCAmelCase: Tuple ) -> None:
'''simple docstring'''
warnings.warn(
"The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use FlavaImageProcessor instead." , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 550 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
__lowercase : Optional[int] =TypeVar("""T""")
def a__ ( lowercase__ ):
'''simple docstring'''
return (position - 1) // 2
def a__ ( lowercase__ ):
'''simple docstring'''
return (2 * position) + 1
def a__ ( lowercase__ ):
'''simple docstring'''
return (2 * position) + 2
class A ( Generic[T] ):
def __init__( self: List[str] ) -> None:
'''simple docstring'''
UpperCAmelCase_ =[]
UpperCAmelCase_ ={}
UpperCAmelCase_ =0
def __len__( self: Union[str, Any] ) -> int:
'''simple docstring'''
return self.elements
def __repr__( self: Dict ) -> str:
'''simple docstring'''
return str(self.heap )
def lowerCAmelCase__ ( self: Any ) -> bool:
'''simple docstring'''
return self.elements == 0
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: T , _lowerCAmelCase: int ) -> None:
'''simple docstring'''
self.heap.append((elem, weight) )
UpperCAmelCase_ =self.elements
self.elements += 1
self._bubble_up(_lowerCAmelCase )
def lowerCAmelCase__ ( self: Tuple ) -> T:
'''simple docstring'''
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
UpperCAmelCase_ , UpperCAmelCase_ =self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
UpperCAmelCase_ , UpperCAmelCase_ =self.heap[0]
self._bubble_down(_lowerCAmelCase )
return elem
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: T , _lowerCAmelCase: int ) -> None:
'''simple docstring'''
UpperCAmelCase_ =self.position_map[elem]
UpperCAmelCase_ =(elem, weight)
if position > 0:
UpperCAmelCase_ =get_parent_position(_lowerCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ =self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_lowerCAmelCase )
else:
self._bubble_down(_lowerCAmelCase )
else:
self._bubble_down(_lowerCAmelCase )
def lowerCAmelCase__ ( self: Any , _lowerCAmelCase: T ) -> None:
'''simple docstring'''
UpperCAmelCase_ =self.position_map[elem]
if curr_pos == 0:
return None
UpperCAmelCase_ =get_parent_position(_lowerCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ =self.heap[curr_pos]
UpperCAmelCase_ , UpperCAmelCase_ =self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_lowerCAmelCase , _lowerCAmelCase )
return self._bubble_up(_lowerCAmelCase )
return None
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: T ) -> None:
'''simple docstring'''
UpperCAmelCase_ =self.position_map[elem]
UpperCAmelCase_ , UpperCAmelCase_ =self.heap[curr_pos]
UpperCAmelCase_ =get_child_left_position(_lowerCAmelCase )
UpperCAmelCase_ =get_child_right_position(_lowerCAmelCase )
if child_left_position < self.elements and child_right_position < self.elements:
UpperCAmelCase_ , UpperCAmelCase_ =self.heap[child_left_position]
UpperCAmelCase_ , UpperCAmelCase_ =self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_lowerCAmelCase , _lowerCAmelCase )
return self._bubble_down(_lowerCAmelCase )
if child_left_position < self.elements:
UpperCAmelCase_ , UpperCAmelCase_ =self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_lowerCAmelCase , _lowerCAmelCase )
return self._bubble_down(_lowerCAmelCase )
else:
return None
if child_right_position < self.elements:
UpperCAmelCase_ , UpperCAmelCase_ =self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_lowerCAmelCase , _lowerCAmelCase )
return self._bubble_down(_lowerCAmelCase )
return None
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: int , _lowerCAmelCase: int ) -> None:
'''simple docstring'''
UpperCAmelCase_ =self.heap[nodea_pos][0]
UpperCAmelCase_ =self.heap[nodea_pos][0]
UpperCAmelCase_ , UpperCAmelCase_ =(
self.heap[nodea_pos],
self.heap[nodea_pos],
)
UpperCAmelCase_ =nodea_pos
UpperCAmelCase_ =nodea_pos
class A ( Generic[T] ):
def __init__( self: Tuple ) -> None:
'''simple docstring'''
UpperCAmelCase_ ={}
UpperCAmelCase_ =0
def __repr__( self: List[str] ) -> str:
'''simple docstring'''
return str(self.connections )
def __len__( self: Optional[Any] ) -> int:
'''simple docstring'''
return self.nodes
def lowerCAmelCase__ ( self: Union[str, Any] , _lowerCAmelCase: T ) -> None:
'''simple docstring'''
if node not in self.connections:
UpperCAmelCase_ ={}
self.nodes += 1
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: T , _lowerCAmelCase: T , _lowerCAmelCase: int ) -> None:
'''simple docstring'''
self.add_node(_lowerCAmelCase )
self.add_node(_lowerCAmelCase )
UpperCAmelCase_ =weight
UpperCAmelCase_ =weight
def a__ ( lowercase__ , ):
'''simple docstring'''
UpperCAmelCase_ ={node: maxsize for node in graph.connections}
UpperCAmelCase_ ={node: None for node in graph.connections}
UpperCAmelCase_ =MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(lowercase__ , lowercase__ )
if priority_queue.is_empty():
return dist, parent
# initialization
UpperCAmelCase_ =priority_queue.extract_min()
UpperCAmelCase_ =0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCAmelCase_ =dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowercase__ , dist[neighbour] )
UpperCAmelCase_ =node
# running prim's algorithm
while not priority_queue.is_empty():
UpperCAmelCase_ =priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCAmelCase_ =dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowercase__ , dist[neighbour] )
UpperCAmelCase_ =node
return dist, parent
| 550 | 1 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
SCREAMING_SNAKE_CASE = logging.getLogger()
@unittest.skip("Temporarily disable the doc tests." )
@require_torch
@require_tf
@slow
class A_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self , _A , _A = None , _A = None , _A = None , _A = True , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : List[str] = [file for file in os.listdir(_A) if os.path.isfile(os.path.join(_A , _A))]
if identifier is not None:
_UpperCAmelCase : List[Any] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_A , _A):
for n_ in n_identifier:
_UpperCAmelCase : Optional[Any] = [file for file in files if n_ not in file]
else:
_UpperCAmelCase : Optional[int] = [file for file in files if n_identifier not in file]
_UpperCAmelCase : Any = ignore_files or []
ignore_files.append('''__init__.py''')
_UpperCAmelCase : Any = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('''Testing''' , _A)
if only_modules:
_UpperCAmelCase : Tuple = file.split('''.''')[0]
try:
_UpperCAmelCase : str = getattr(_A , _A)
_UpperCAmelCase : Optional[int] = doctest.DocTestSuite(_A)
_UpperCAmelCase : int = unittest.TextTestRunner().run(_A)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(f'''{module_identifier} is not a module.''')
else:
_UpperCAmelCase : List[Any] = doctest.testfile(str('''..''' / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def snake_case__ ( self) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = Path('''src/transformers''')
_UpperCAmelCase : List[str] = '''modeling'''
_UpperCAmelCase : List[Any] = [
'''modeling_ctrl.py''',
'''modeling_tf_ctrl.py''',
]
self.analyze_directory(_A , identifier=_A , ignore_files=_A)
def snake_case__ ( self) -> Any:
"""simple docstring"""
_UpperCAmelCase : Dict = Path('''src/transformers''')
_UpperCAmelCase : List[str] = '''tokenization'''
self.analyze_directory(_A , identifier=_A)
def snake_case__ ( self) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : List[str] = Path('''src/transformers''')
_UpperCAmelCase : Optional[int] = '''configuration'''
self.analyze_directory(_A , identifier=_A)
def snake_case__ ( self) -> Any:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = Path('''src/transformers''')
_UpperCAmelCase : Any = ['''configuration''', '''modeling''', '''tokenization''']
self.analyze_directory(_A , n_identifier=_A)
def snake_case__ ( self) -> str:
"""simple docstring"""
_UpperCAmelCase : Dict = Path('''docs/source''')
_UpperCAmelCase : List[Any] = ['''favicon.ico''']
self.analyze_directory(_A , ignore_files=_A , only_modules=_A)
| 485 |
import os
import sys
SCREAMING_SNAKE_CASE = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
SCREAMING_SNAKE_CASE = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def _lowerCamelCase ( *__A : Any , **__A : List[Any] ) -> str:
return AutoConfig.from_pretrained(*__A , **__A )
@add_start_docstrings(AutoTokenizer.__doc__ )
def _lowerCamelCase ( *__A : int , **__A : str ) -> List[Any]:
return AutoTokenizer.from_pretrained(*__A , **__A )
@add_start_docstrings(AutoModel.__doc__ )
def _lowerCamelCase ( *__A : Any , **__A : str ) -> Dict:
return AutoModel.from_pretrained(*__A , **__A )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def _lowerCamelCase ( *__A : Optional[int] , **__A : str ) -> List[Any]:
return AutoModelForCausalLM.from_pretrained(*__A , **__A )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def _lowerCamelCase ( *__A : List[Any] , **__A : Any ) -> int:
return AutoModelForMaskedLM.from_pretrained(*__A , **__A )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def _lowerCamelCase ( *__A : List[str] , **__A : Any ) -> Optional[Any]:
return AutoModelForSequenceClassification.from_pretrained(*__A , **__A )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def _lowerCamelCase ( *__A : int , **__A : Any ) -> Any:
return AutoModelForQuestionAnswering.from_pretrained(*__A , **__A )
| 485 | 1 |
_UpperCAmelCase : List[str] = {
"""A""": """.-""", """B""": """-...""", """C""": """-.-.""", """D""": """-..""", """E""": """.""", """F""": """..-.""", """G""": """--.""",
"""H""": """....""", """I""": """..""", """J""": """.---""", """K""": """-.-""", """L""": """.-..""", """M""": """--""", """N""": """-.""",
"""O""": """---""", """P""": """.--.""", """Q""": """--.-""", """R""": """.-.""", """S""": """...""", """T""": """-""", """U""": """..-""",
"""V""": """...-""", """W""": """.--""", """X""": """-..-""", """Y""": """-.--""", """Z""": """--..""", """1""": """.----""",
"""2""": """..---""", """3""": """...--""", """4""": """....-""", """5""": """.....""", """6""": """-....""", """7""": """--...""",
"""8""": """---..""", """9""": """----.""", """0""": """-----""", """&""": """.-...""", """@""": """.--.-.""",
""":""": """---...""", """,""": """--..--""", """.""": """.-.-.-""", """'""": """.----.""", """\"""": """.-..-.""",
"""?""": """..--..""", """/""": """-..-.""", """=""": """-...-""", """+""": """.-.-.""", """-""": """-....-""",
"""(""": """-.--.""", """)""": """-.--.-""", """!""": """-.-.--""", """ """: """/"""
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
_UpperCAmelCase : Tuple = {value: key for key, value in MORSE_CODE_DICT.items()}
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = 'Morse code here!'
print(UpperCamelCase__ )
snake_case_ = encrypt(UpperCamelCase__ )
print(UpperCamelCase__ )
snake_case_ = decrypt(UpperCamelCase__ )
print(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 108 |
def __lowerCamelCase ( UpperCamelCase__ = 50000000 ):
'''simple docstring'''
snake_case_ = set()
snake_case_ = int((limit - 24) ** (1 / 2) )
snake_case_ = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , UpperCamelCase__ ) ) )
for primea in primes:
snake_case_ = primea * primea
for primea in primes:
snake_case_ = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
snake_case_ = primea * primea * primea * primea
snake_case_ = square + cube + tetr
if total >= limit:
break
ret.add(UpperCamelCase__ )
return len(UpperCamelCase__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 108 | 1 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __lowercase ( __magic_name__ ):
@require_torch
def UpperCamelCase__ ( self ) -> Any:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
__a = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
__a = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
__a = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
__a = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(UpperCamelCase )
BertModel.from_pretrained(UpperCamelCase )
BertTokenizer.from_pretrained(UpperCamelCase )
pipeline(task='fill-mask' , model=UpperCamelCase )
# baseline - just load from_pretrained with normal network
__a = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
__a = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
__a = '1'
__a = subprocess.run(UpperCamelCase , env=UpperCamelCase , check=UpperCamelCase , capture_output=UpperCamelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def UpperCamelCase__ ( self ) -> Tuple:
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
__a = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
__a = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
__a = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
__a = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(UpperCamelCase )
BertModel.from_pretrained(UpperCamelCase )
BertTokenizer.from_pretrained(UpperCamelCase )
pipeline(task='fill-mask' , model=UpperCamelCase )
# baseline - just load from_pretrained with normal network
__a = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
__a = self.get_env()
__a = subprocess.run(UpperCamelCase , env=UpperCamelCase , check=UpperCamelCase , capture_output=UpperCamelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def UpperCamelCase__ ( self ) -> Union[str, Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
__a = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
__a = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
__a = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
__a = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
__a = self.get_env()
__a = subprocess.run(UpperCamelCase , env=UpperCamelCase , check=UpperCamelCase , capture_output=UpperCamelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# next emulate no network
__a = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
__a = '1'
__a = subprocess.run(UpperCamelCase , env=UpperCamelCase , check=UpperCamelCase , capture_output=UpperCamelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def UpperCamelCase__ ( self ) -> List[Any]:
__a = '\nfrom transformers import pipeline\n '
__a = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
__a = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
__a = self.get_env()
__a = '1'
__a = [sys.executable, '-c', '\n'.join([load, mock, run] )]
__a = subprocess.run(UpperCamelCase , env=UpperCamelCase , check=UpperCamelCase , capture_output=UpperCamelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '' ) , )
@require_torch
def UpperCamelCase__ ( self ) -> List[str]:
__a = '\nfrom transformers import AutoModel\n '
__a = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
__a = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
__a = self.get_env()
__a = subprocess.run(UpperCamelCase , env=UpperCamelCase , check=UpperCamelCase , capture_output=UpperCamelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
__a = '1'
__a = subprocess.run(UpperCamelCase , env=UpperCamelCase , check=UpperCamelCase , capture_output=UpperCamelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
| 539 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"openai/imagegpt-small": "",
"openai/imagegpt-medium": "",
"openai/imagegpt-large": "",
}
class __lowercase ( __magic_name__ ):
_a = """imagegpt"""
_a = ["""past_key_values"""]
_a = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , UpperCamelCase=512 + 1 , UpperCamelCase=32 * 32 , UpperCamelCase=512 , UpperCamelCase=24 , UpperCamelCase=8 , UpperCamelCase=None , UpperCamelCase="quick_gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=1e-5 , UpperCamelCase=0.02 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=False , **UpperCamelCase , ) -> int:
__a = vocab_size
__a = n_positions
__a = n_embd
__a = n_layer
__a = n_head
__a = n_inner
__a = activation_function
__a = resid_pdrop
__a = embd_pdrop
__a = attn_pdrop
__a = layer_norm_epsilon
__a = initializer_range
__a = scale_attn_weights
__a = use_cache
__a = scale_attn_by_inverse_layer_idx
__a = reorder_and_upcast_attn
__a = tie_word_embeddings
super().__init__(tie_word_embeddings=UpperCamelCase , **UpperCamelCase )
class __lowercase ( __magic_name__ ):
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
] )
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase = 1 , UpperCamelCase = -1 , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = 3 , UpperCamelCase = 32 , UpperCamelCase = 32 , ) -> Mapping[str, Any]:
__a = self._generate_dummy_images(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
__a = dict(preprocessor(images=UpperCamelCase , return_tensors=UpperCamelCase ) )
return inputs
| 539 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A : List[Any] = logging.get_logger(__name__)
A : Optional[Any] = {
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''swin'''
lowerCamelCase__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : List[Any] , __magic_name__ : Dict=224 , __magic_name__ : Tuple=4 , __magic_name__ : str=3 , __magic_name__ : Tuple=96 , __magic_name__ : Optional[int]=[2, 2, 6, 2] , __magic_name__ : Tuple=[3, 6, 12, 24] , __magic_name__ : Union[str, Any]=7 , __magic_name__ : Optional[int]=4.0 , __magic_name__ : Optional[Any]=True , __magic_name__ : Dict=0.0 , __magic_name__ : Optional[Any]=0.0 , __magic_name__ : Any=0.1 , __magic_name__ : Tuple="gelu" , __magic_name__ : Optional[int]=False , __magic_name__ : Dict=0.02 , __magic_name__ : Optional[Any]=1e-5 , __magic_name__ : Union[str, Any]=32 , __magic_name__ : Any=None , __magic_name__ : str=None , **__magic_name__ : Tuple , ) -> List[Any]:
super().__init__(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = embed_dim
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = len(__magic_name__ )
SCREAMING_SNAKE_CASE_ = num_heads
SCREAMING_SNAKE_CASE_ = window_size
SCREAMING_SNAKE_CASE_ = mlp_ratio
SCREAMING_SNAKE_CASE_ = qkv_bias
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = drop_path_rate
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = use_absolute_embeddings
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE_ = int(embed_dim * 2 ** (len(__magic_name__ ) - 1) )
SCREAMING_SNAKE_CASE_ = ["stem"] + [F'''stage{idx}''' for idx in range(1 , len(__magic_name__ ) + 1 )]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = get_aligned_output_features_output_indices(
out_features=__magic_name__ , out_indices=__magic_name__ , stage_names=self.stage_names )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __A ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __A ( self : Optional[Any] ) -> float:
return 1e-4
| 702 | import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
A : str = logging.getLogger(__name__)
@dataclass
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
lowerCamelCase__ = field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Whether to SortishSamler or not.'''} )
lowerCamelCase__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCamelCase__ = field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''whether to use adafactor'''} )
lowerCamelCase__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
lowerCamelCase__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
lowerCamelCase__ = field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
lowerCamelCase__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
lowerCamelCase__ = field(
default='''linear''' , metadata={'''help''': f"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} , )
| 356 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__snake_case = {"""configuration_vit""": ["""VIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTConfig""", """ViTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""ViTFeatureExtractor"""]
__snake_case = ["""ViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""VIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTForImageClassification""",
"""ViTForMaskedImageModeling""",
"""ViTModel""",
"""ViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""TFViTForImageClassification""",
"""TFViTModel""",
"""TFViTPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""FlaxViTForImageClassification""",
"""FlaxViTModel""",
"""FlaxViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 451 | '''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 451 | 1 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __UpperCamelCase ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict=0.9_99 , lowerCAmelCase__ : int="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCAmelCase__ : Union[str, Any] ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCAmelCase__ : Any ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" )
__a : str = []
for i in range(lowerCAmelCase__ ):
__a : str = i / num_diffusion_timesteps
__a : Optional[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCAmelCase__ ) / alpha_bar_fn(lowerCAmelCase__ ) , lowerCAmelCase__ ) )
return torch.tensor(lowerCAmelCase__ , dtype=torch.floataa )
class UpperCamelCase__ ( __lowercase ,__lowercase ):
_SCREAMING_SNAKE_CASE : List[str] = [e.name for e in KarrasDiffusionSchedulers]
_SCREAMING_SNAKE_CASE : Optional[int] = 2
@register_to_config
def __init__(self : Any , snake_case_ : int = 1_0_0_0 , snake_case_ : float = 0.0_0085 , snake_case_ : float = 0.012 , snake_case_ : str = "linear" , snake_case_ : Optional[Union[np.ndarray, List[float]]] = None , snake_case_ : str = "epsilon" , snake_case_ : Optional[bool] = False , snake_case_ : Optional[bool] = False , snake_case_ : float = 1.0 , snake_case_ : str = "linspace" , snake_case_ : int = 0 , ):
if trained_betas is not None:
__a : Tuple = torch.tensor(snake_case_ , dtype=torch.floataa )
elif beta_schedule == "linear":
__a : List[Any] = torch.linspace(snake_case_ , snake_case_ , snake_case_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__a : Optional[Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , snake_case_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__a : Optional[Any] = betas_for_alpha_bar(snake_case_ , alpha_transform_type='''cosine''' )
elif beta_schedule == "exp":
__a : str = betas_for_alpha_bar(snake_case_ , alpha_transform_type='''exp''' )
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}" )
__a : Optional[int] = 1.0 - self.betas
__a : str = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(snake_case_ , snake_case_ , snake_case_ )
__a : Optional[int] = use_karras_sigmas
def lowerCAmelCase (self : Dict , snake_case_ : Dict , snake_case_ : Union[str, Any]=None ):
if schedule_timesteps is None:
__a : Optional[int] = self.timesteps
__a : List[Any] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__a : Tuple = 1 if len(snake_case_ ) > 1 else 0
else:
__a : Tuple = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep
__a : Dict = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCAmelCase (self : List[str] ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCAmelCase (self : Dict , snake_case_ : torch.FloatTensor , snake_case_ : Union[float, torch.FloatTensor] , ):
__a : List[str] = self.index_for_timestep(snake_case_ )
__a : Tuple = self.sigmas[step_index]
__a : Tuple = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCAmelCase (self : int , snake_case_ : int , snake_case_ : Union[str, torch.device] = None , snake_case_ : Optional[int] = None , ):
__a : int = num_inference_steps
__a : Any = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__a : str = np.linspace(0 , num_train_timesteps - 1 , snake_case_ , dtype=snake_case_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__a : int = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__a : List[Any] = (np.arange(0 , snake_case_ ) * step_ratio).round()[::-1].copy().astype(snake_case_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__a : Any = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__a : Tuple = (np.arange(snake_case_ , 0 , -step_ratio )).round().copy().astype(snake_case_ )
timesteps -= 1
else:
raise ValueError(
f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." )
__a : List[Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__a : Optional[Any] = np.log(snake_case_ )
__a : List[Any] = np.interp(snake_case_ , np.arange(0 , len(snake_case_ ) ) , snake_case_ )
if self.config.use_karras_sigmas:
__a : Union[str, Any] = self._convert_to_karras(in_sigmas=snake_case_ , num_inference_steps=self.num_inference_steps )
__a : List[str] = np.array([self._sigma_to_t(snake_case_ , snake_case_ ) for sigma in sigmas] )
__a : Optional[int] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__a : int = torch.from_numpy(snake_case_ ).to(device=snake_case_ )
__a : Optional[Any] = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
__a : Tuple = torch.from_numpy(snake_case_ )
__a : List[Any] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(snake_case_ ).startswith('''mps''' ):
# mps does not support float64
__a : List[Any] = timesteps.to(snake_case_ , dtype=torch.floataa )
else:
__a : Any = timesteps.to(device=snake_case_ )
# empty dt and derivative
__a : List[str] = None
__a : Optional[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__a : List[str] = defaultdict(snake_case_ )
def lowerCAmelCase (self : List[Any] , snake_case_ : Optional[int] , snake_case_ : List[Any] ):
# get log sigma
__a : int = np.log(snake_case_ )
# get distribution
__a : str = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
__a : List[str] = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
__a : List[Any] = low_idx + 1
__a : int = log_sigmas[low_idx]
__a : List[Any] = log_sigmas[high_idx]
# interpolate sigmas
__a : Union[str, Any] = (low - log_sigma) / (low - high)
__a : List[Any] = np.clip(snake_case_ , 0 , 1 )
# transform interpolation to time range
__a : Any = (1 - w) * low_idx + w * high_idx
__a : int = t.reshape(sigma.shape )
return t
def lowerCAmelCase (self : int , snake_case_ : torch.FloatTensor , snake_case_ : Dict ):
__a : float = in_sigmas[-1].item()
__a : float = in_sigmas[0].item()
__a : str = 7.0 # 7.0 is the value used in the paper
__a : Union[str, Any] = np.linspace(0 , 1 , snake_case_ )
__a : List[Any] = sigma_min ** (1 / rho)
__a : Optional[int] = sigma_max ** (1 / rho)
__a : Tuple = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def lowerCAmelCase (self : int ):
return self.dt is None
def lowerCAmelCase (self : Dict , snake_case_ : Union[torch.FloatTensor, np.ndarray] , snake_case_ : Union[float, torch.FloatTensor] , snake_case_ : Union[torch.FloatTensor, np.ndarray] , snake_case_ : bool = True , ):
__a : Optional[Any] = self.index_for_timestep(snake_case_ )
# advance index counter by 1
__a : Dict = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__a : Tuple = self.sigmas[step_index]
__a : List[Any] = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
__a : Any = self.sigmas[step_index - 1]
__a : List[Any] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__a : Union[str, Any] = 0
__a : Tuple = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__a : Optional[Any] = sigma_hat if self.state_in_first_order else sigma_next
__a : Optional[int] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__a : Dict = sigma_hat if self.state_in_first_order else sigma_next
__a : str = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
__a : List[str] = model_output
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" )
if self.config.clip_sample:
__a : List[Any] = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__a : List[Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__a : Optional[Any] = sigma_next - sigma_hat
# store for 2nd order step
__a : Dict = derivative
__a : Union[str, Any] = dt
__a : Union[str, Any] = sample
else:
# 2. 2nd order / Heun's method
__a : List[Any] = (sample - pred_original_sample) / sigma_next
__a : Union[str, Any] = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
__a : int = self.dt
__a : Union[str, Any] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
__a : Union[str, Any] = None
__a : Any = None
__a : Dict = None
__a : List[str] = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case_ )
def lowerCAmelCase (self : Optional[Any] , snake_case_ : torch.FloatTensor , snake_case_ : torch.FloatTensor , snake_case_ : torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__a : List[Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(snake_case_ ):
# mps does not support float64
__a : Optional[Any] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__a : Union[str, Any] = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__a : Union[str, Any] = self.timesteps.to(original_samples.device )
__a : Union[str, Any] = timesteps.to(original_samples.device )
__a : Optional[int] = [self.index_for_timestep(snake_case_ , snake_case_ ) for t in timesteps]
__a : Any = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__a : Dict = sigma.unsqueeze(-1 )
__a : List[str] = original_samples + noise * sigma
return noisy_samples
def __len__(self : Optional[int] ):
return self.config.num_train_timesteps
| 326 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str]=1_0 ):
__a : Tuple = []
for _ in range(lowerCAmelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str=1_0 ):
__a : int = []
for step in range(lowerCAmelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
__a : Any = os.path.join(lowerCAmelCase__ , '''schedule.bin''' )
torch.save(scheduler.state_dict() , lowerCAmelCase__ )
__a : str = torch.load(lowerCAmelCase__ )
scheduler.load_state_dict(lowerCAmelCase__ )
return lrs
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
def lowerCAmelCase (self : Tuple , snake_case_ : List[str] , snake_case_ : Optional[int] , snake_case_ : int ):
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for a, b in zip(snake_case_ , snake_case_ ):
self.assertAlmostEqual(snake_case_ , snake_case_ , delta=snake_case_ )
def lowerCAmelCase (self : Dict ):
__a : List[str] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case_ )
__a : Optional[int] = torch.tensor([0.4, 0.2, -0.5] )
__a : str = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__a : Tuple = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(1_0_0 ):
__a : Optional[int] = criterion(snake_case_ , snake_case_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def lowerCAmelCase (self : Any ):
__a : int = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case_ )
__a : Optional[Any] = torch.tensor([0.4, 0.2, -0.5] )
__a : List[str] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__a : Tuple = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=snake_case_ , weight_decay=0.0 , relative_step=snake_case_ , scale_parameter=snake_case_ , warmup_init=snake_case_ , )
for _ in range(1_0_0_0 ):
__a : str = criterion(snake_case_ , snake_case_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
_SCREAMING_SNAKE_CASE : str = nn.Linear(50 ,50 ) if is_torch_available() else None
_SCREAMING_SNAKE_CASE : Any = AdamW(m.parameters() ,lr=10.0 ) if is_torch_available() else None
_SCREAMING_SNAKE_CASE : Tuple = 10
def lowerCAmelCase (self : int , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : str=None ):
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for a, b in zip(snake_case_ , snake_case_ ):
self.assertAlmostEqual(snake_case_ , snake_case_ , delta=snake_case_ , msg=snake_case_ )
def lowerCAmelCase (self : int ):
__a : Tuple = {'''num_warmup_steps''': 2, '''num_training_steps''': 1_0}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
__a : Tuple = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'''num_warmup_steps''': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, '''num_cycles''': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, '''power''': 2.0, '''lr_end''': 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'''num_warmup_steps''': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
__a , __a : Union[str, Any] = data
__a : int = scheduler_func(self.optimizer , **snake_case_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
__a : Tuple = unwrap_schedule(snake_case_ , self.num_steps )
self.assertListAlmostEqual(
snake_case_ , snake_case_ , tol=1E-2 , msg=f"failed for {scheduler_func} in normal scheduler" , )
__a : Any = scheduler_func(self.optimizer , **snake_case_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(snake_case_ ) # wrap to test picklability of the schedule
__a : Tuple = unwrap_and_save_reload_schedule(snake_case_ , self.num_steps )
self.assertListEqual(snake_case_ , snake_case_ , msg=f"failed for {scheduler_func} in save and reload" )
class UpperCamelCase__ :
def __init__(self : Any , snake_case_ : str ):
__a : Optional[int] = fn
def __call__(self : Any , *snake_case_ : List[Any] , **snake_case_ : Any ):
return self.fn(*snake_case_ , **snake_case_ )
@classmethod
def lowerCAmelCase (self : Tuple , snake_case_ : List[str] ):
__a : Any = list(map(self , scheduler.lr_lambdas ) )
| 326 | 1 |
"""simple docstring"""
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
__UpperCAmelCase = False
__UpperCAmelCase = False
def lowercase__ ( lowerCamelCase : Namespace ) -> str:
return TrainCommand(UpperCAmelCase_ )
class lowercase_ ( _lowerCAmelCase ):
@staticmethod
def _lowerCAmelCase ( _lowercase : List[Any] ):
lowerCAmelCase__ : Tuple = parser.add_parser("train" , help="CLI tool to train a model on a task." )
train_parser.add_argument(
"--train_data" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." , )
train_parser.add_argument(
"--column_label" , type=SCREAMING_SNAKE_CASE__ , default=0 , help="Column of the dataset csv file with example labels." )
train_parser.add_argument(
"--column_text" , type=SCREAMING_SNAKE_CASE__ , default=1 , help="Column of the dataset csv file with example texts." )
train_parser.add_argument(
"--column_id" , type=SCREAMING_SNAKE_CASE__ , default=2 , help="Column of the dataset csv file with example ids." )
train_parser.add_argument(
"--skip_first_row" , action="store_true" , help="Skip the first row of the csv file (headers)." )
train_parser.add_argument("--validation_data" , type=SCREAMING_SNAKE_CASE__ , default="" , help="path to validation dataset." )
train_parser.add_argument(
"--validation_split" , type=SCREAMING_SNAKE_CASE__ , default=0.1 , help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." , )
train_parser.add_argument("--output" , type=SCREAMING_SNAKE_CASE__ , default="./" , help="path to saved the trained model." )
train_parser.add_argument(
"--task" , type=SCREAMING_SNAKE_CASE__ , default="text_classification" , help="Task to train the model on." )
train_parser.add_argument(
"--model" , type=SCREAMING_SNAKE_CASE__ , default="bert-base-uncased" , help="Model's name or path to stored model." )
train_parser.add_argument("--train_batch_size" , type=SCREAMING_SNAKE_CASE__ , default=3_2 , help="Batch size for training." )
train_parser.add_argument("--valid_batch_size" , type=SCREAMING_SNAKE_CASE__ , default=6_4 , help="Batch size for validation." )
train_parser.add_argument("--learning_rate" , type=SCREAMING_SNAKE_CASE__ , default=3e-5 , help="Learning rate." )
train_parser.add_argument("--adam_epsilon" , type=SCREAMING_SNAKE_CASE__ , default=1e-0_8 , help="Epsilon for Adam optimizer." )
train_parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
def __init__( self : Optional[Any] , _lowercase : int ):
lowerCAmelCase__ : List[Any] = logging.get_logger("transformers-cli/training" )
lowerCAmelCase__ : List[Any] = "tf" if is_tf_available() else "torch"
os.makedirs(args.output , exist_ok=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Union[str, Any] = args.output
lowerCAmelCase__ : Tuple = args.column_label
lowerCAmelCase__ : int = args.column_text
lowerCAmelCase__ : List[Any] = args.column_id
self.logger.info(f"Loading {args.task} pipeline for {args.model}" )
if args.task == "text_classification":
lowerCAmelCase__ : List[str] = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"Loading dataset from {args.train_data}" )
lowerCAmelCase__ : Tuple = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowerCAmelCase__ : int = None
if args.validation_data:
self.logger.info(f"Loading validation dataset from {args.validation_data}" )
lowerCAmelCase__ : Dict = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowerCAmelCase__ : Any = args.validation_split
lowerCAmelCase__ : List[str] = args.train_batch_size
lowerCAmelCase__ : Optional[Any] = args.valid_batch_size
lowerCAmelCase__ : Tuple = args.learning_rate
lowerCAmelCase__ : List[str] = args.adam_epsilon
def _lowerCAmelCase ( self : Optional[int] ):
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def _lowerCAmelCase ( self : Union[str, Any] ):
raise NotImplementedError
def _lowerCAmelCase ( self : Dict ):
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 308 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {"""configuration_wavlm""": ["""WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WavLMConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WavLMForAudioFrameClassification""",
"""WavLMForCTC""",
"""WavLMForSequenceClassification""",
"""WavLMForXVector""",
"""WavLMModel""",
"""WavLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 104 | 0 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
_A = "Usage of script: script_name <size_of_canvas:int>"
_A = [0] * 1_00 + [1] * 10
random.shuffle(choice)
def lowerCamelCase__ ( __lowerCAmelCase : int ):
"""simple docstring"""
lowerCAmelCase_ = [[False for i in range(__lowerCAmelCase )] for j in range(__lowerCAmelCase )]
return canvas
def lowerCamelCase__ ( __lowerCAmelCase : list[list[bool]] ):
"""simple docstring"""
for i, row in enumerate(__lowerCAmelCase ):
for j, _ in enumerate(__lowerCAmelCase ):
lowerCAmelCase_ = bool(random.getrandbits(1 ) )
def lowerCamelCase__ ( __lowerCAmelCase : list[list[bool]] ):
"""simple docstring"""
lowerCAmelCase_ = np.array(__lowerCAmelCase )
lowerCAmelCase_ = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__lowerCAmelCase ):
for c, pt in enumerate(__lowerCAmelCase ):
lowerCAmelCase_ = __judge_point(
__lowerCAmelCase , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
lowerCAmelCase_ = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
lowerCAmelCase_ = current_canvas.tolist()
return return_canvas
def lowerCamelCase__ ( __lowerCAmelCase : bool , __lowerCAmelCase : list[list[bool]] ):
"""simple docstring"""
lowerCAmelCase_ = 0
lowerCAmelCase_ = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
lowerCAmelCase_ = pt
if pt:
if alive < 2:
lowerCAmelCase_ = False
elif alive == 2 or alive == 3:
lowerCAmelCase_ = True
elif alive > 3:
lowerCAmelCase_ = False
else:
if alive == 3:
lowerCAmelCase_ = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
_A = int(sys.argv[1])
# main working structure of this module.
_A = create_canvas(canvas_size)
seed(c)
_A, _A = plt.subplots()
fig.show()
_A = ListedColormap(["w", "k"])
try:
while True:
_A = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 701 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_A = {
"configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"],
"feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"],
"processing_wav2vec2": ["Wav2Vec2Processor"],
"tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Wav2Vec2ForAudioFrameClassification",
"Wav2Vec2ForCTC",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2ForPreTraining",
"Wav2Vec2ForSequenceClassification",
"Wav2Vec2ForXVector",
"Wav2Vec2Model",
"Wav2Vec2PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWav2Vec2ForCTC",
"TFWav2Vec2Model",
"TFWav2Vec2PreTrainedModel",
"TFWav2Vec2ForSequenceClassification",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"FlaxWav2Vec2ForCTC",
"FlaxWav2Vec2ForPreTraining",
"FlaxWav2Vec2Model",
"FlaxWav2Vec2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 279 | 0 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class a__( lowerCamelCase__ ):
lowercase__ = (DDPMScheduler,)
def lowercase_ ( self : Optional[int] , **__snake_case : Optional[int] ):
a : Optional[Any] = {
'num_train_timesteps': 10_00,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**__snake_case )
return config
def lowercase_ ( self : Optional[Any] ):
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__snake_case )
def lowercase_ ( self : List[str] ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__snake_case , beta_end=__snake_case )
def lowercase_ ( self : int ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__snake_case )
def lowercase_ ( self : Dict ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__snake_case )
def lowercase_ ( self : Dict ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__snake_case )
def lowercase_ ( self : List[str] ):
self.check_over_configs(thresholding=__snake_case )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__snake_case , prediction_type=__snake_case , sample_max_value=__snake_case , )
def lowercase_ ( self : Union[str, Any] ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__snake_case )
def lowercase_ ( self : str ):
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=__snake_case )
def lowercase_ ( self : List[Any] ):
a : int = self.scheduler_classes[0]
a : Tuple = self.get_scheduler_config()
a : int = scheduler_class(**__snake_case )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.02 ) ) < 1e-5
def lowercase_ ( self : Dict ):
a : Tuple = self.scheduler_classes[0]
a : List[Any] = self.get_scheduler_config()
a : Any = scheduler_class(**__snake_case )
a : Union[str, Any] = len(__snake_case )
a : Optional[Any] = self.dummy_model()
a : Tuple = self.dummy_sample_deter
a : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(__snake_case ) ):
# 1. predict noise residual
a : Optional[Any] = model(__snake_case , __snake_case )
# 2. predict previous mean of sample x_t-1
a : Tuple = scheduler.step(__snake_case , __snake_case , __snake_case , generator=__snake_case ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
a : List[Any] = pred_prev_sample
a : int = torch.sum(torch.abs(__snake_case ) )
a : Optional[Any] = torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def lowercase_ ( self : List[str] ):
a : Tuple = self.scheduler_classes[0]
a : int = self.get_scheduler_config(prediction_type='v_prediction' )
a : List[Any] = scheduler_class(**__snake_case )
a : str = len(__snake_case )
a : Optional[int] = self.dummy_model()
a : List[Any] = self.dummy_sample_deter
a : str = torch.manual_seed(0 )
for t in reversed(range(__snake_case ) ):
# 1. predict noise residual
a : List[Any] = model(__snake_case , __snake_case )
# 2. predict previous mean of sample x_t-1
a : Tuple = scheduler.step(__snake_case , __snake_case , __snake_case , generator=__snake_case ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
a : List[str] = pred_prev_sample
a : Dict = torch.sum(torch.abs(__snake_case ) )
a : Optional[int] = torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def lowercase_ ( self : List[str] ):
a : Any = self.scheduler_classes[0]
a : List[Any] = self.get_scheduler_config()
a : Tuple = scheduler_class(**__snake_case )
a : Dict = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__snake_case )
a : Optional[Any] = scheduler.timesteps
for i, timestep in enumerate(__snake_case ):
if i == len(__snake_case ) - 1:
a : int = -1
else:
a : List[str] = timesteps[i + 1]
a : Union[str, Any] = scheduler.previous_timestep(__snake_case )
a : str = prev_t.item()
self.assertEqual(__snake_case , __snake_case )
def lowercase_ ( self : str ):
a : List[Any] = self.scheduler_classes[0]
a : List[Any] = self.get_scheduler_config()
a : Dict = scheduler_class(**__snake_case )
a : int = [1_00, 87, 50, 51, 0]
with self.assertRaises(__snake_case , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=__snake_case )
def lowercase_ ( self : Any ):
a : Tuple = self.scheduler_classes[0]
a : Dict = self.get_scheduler_config()
a : str = scheduler_class(**__snake_case )
a : str = [1_00, 87, 50, 1, 0]
a : Dict = len(__snake_case )
with self.assertRaises(__snake_case , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=__snake_case , timesteps=__snake_case )
def lowercase_ ( self : int ):
a : Union[str, Any] = self.scheduler_classes[0]
a : Optional[Any] = self.get_scheduler_config()
a : List[str] = scheduler_class(**__snake_case )
a : Dict = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__snake_case , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=__snake_case ) | 526 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase: List[Any] = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Dict = ['ReformerTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Tuple = ['ReformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: List[Any] = [
'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ReformerAttention',
'ReformerForMaskedLM',
'ReformerForQuestionAnswering',
'ReformerForSequenceClassification',
'ReformerLayer',
'ReformerModel',
'ReformerModelWithLMHead',
'ReformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase: str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 526 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ =logging.get_logger(__name__)
lowercase__ ={
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "big_bird"
def __init__(self : Optional[int] , snake_case_ : Any=5_0_3_5_8 , snake_case_ : int=7_6_8 , snake_case_ : Optional[Any]=1_2 , snake_case_ : int=1_2 , snake_case_ : Tuple=3_0_7_2 , snake_case_ : str="gelu_new" , snake_case_ : Optional[Any]=0.1 , snake_case_ : Any=0.1 , snake_case_ : Tuple=4_0_9_6 , snake_case_ : Dict=2 , snake_case_ : Dict=0.02 , snake_case_ : Any=1E-12 , snake_case_ : str=True , snake_case_ : Optional[int]=0 , snake_case_ : Optional[Any]=1 , snake_case_ : Tuple=2 , snake_case_ : int=6_6 , snake_case_ : List[Any]="block_sparse" , snake_case_ : Optional[int]=True , snake_case_ : str=False , snake_case_ : List[str]=6_4 , snake_case_ : List[str]=3 , snake_case_ : Dict=None , **snake_case_ : Any , ):
super().__init__(
pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , sep_token_id=snake_case_ , **snake_case_ , )
__a : List[str] = vocab_size
__a : List[Any] = max_position_embeddings
__a : Union[str, Any] = hidden_size
__a : int = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : List[Any] = intermediate_size
__a : Dict = hidden_act
__a : int = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : Optional[int] = initializer_range
__a : List[str] = type_vocab_size
__a : Any = layer_norm_eps
__a : List[str] = use_cache
__a : Any = rescale_embeddings
__a : List[Any] = attention_type
__a : str = use_bias
__a : Dict = block_size
__a : List[Any] = num_random_blocks
__a : List[str] = classifier_dropout
class UpperCamelCase__ ( __lowercase ):
@property
def lowerCAmelCase (self : List[Any] ):
if self.task == "multiple-choice":
__a : Any = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__a : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 721 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class UpperCamelCase__ :
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default="codeparrot/codeparrot" ,metadata={"help": "Model name or path of model to be trained."} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default="./" ,metadata={"help": "Save dir where model repo is cloned and models updates are saved to."} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default="codeparrot/codeparrot-clean-train" ,metadata={"help": "Name or path of training dataset."} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default="codeparrot/codeparrot-clean-valid" ,metadata={"help": "Name or path of validation dataset."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(default=2 ,metadata={"help": "Batch size for training."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(default=2 ,metadata={"help": "Batch size for evaluation."} )
_SCREAMING_SNAKE_CASE : Optional[float] = field(default=0.1 ,metadata={"help": "Value of weight decay."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=10_000 ,metadata={"help": "Size of buffer used to shuffle streaming dataset."} )
_SCREAMING_SNAKE_CASE : Optional[float] = field(default=2e-4 ,metadata={"help": "Learning rate fo training."} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(default="cosine" ,metadata={"help": "Learning rate."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=750 ,metadata={"help": "Number of warmup steps in the learning rate schedule."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=16 ,metadata={"help": "Number of gradient accumulation steps."} )
_SCREAMING_SNAKE_CASE : Optional[bool] = field(
default=__lowercase ,metadata={"help": "Use gradient checkpointing to reduce memory footprint."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(default=50_000 ,metadata={"help": "Maximum number of training steps."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=-1 ,metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(default=1_024 ,metadata={"help": "Sequence lengths used for training."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(default=1 ,metadata={"help": "Training seed."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=1_024 ,metadata={"help": "Interval to save checkpoints. Measured as number of forward passes not training steps."} ,)
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase ,metadata={"help": "States path if the training should continue from a checkpoint folder."} )
_SCREAMING_SNAKE_CASE : Optional[bool] = field(default=__lowercase ,metadata={"help": "If True the data is pretokenized."} )
@dataclass
class UpperCamelCase__ :
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default="codeparrot/codeparrot" ,metadata={"help": "Model name or path of model to be evaluated."} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default="codeparrot/codeparrot-clean-valid" ,metadata={"help": "Name or path of validation dataset."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(default=2 ,metadata={"help": "Batch size used for evaluation."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=-1 ,metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(default=1_024 ,metadata={"help": "Length of sequences to be evaluated."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(default=1 ,metadata={"help": "Random seed used for evaluation."} )
@dataclass
class UpperCamelCase__ :
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default="codeparrot/codeparrot" ,metadata={"help": "Model name or path of model to be evaluated."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(default=__lowercase ,metadata={"help": "Number of workers used for code evaluation."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__lowercase ,metadata={"help": "The number of human-eval tasks to run. If not included all tasks are evaluated."} ,)
_SCREAMING_SNAKE_CASE : Optional[bool] = field(
default=__lowercase ,metadata={"help": "Sample from the language model's output distribution."} )
_SCREAMING_SNAKE_CASE : Optional[float] = field(default=0.2 ,metadata={"help": "Sampling temperature used for generation."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(default=256 ,metadata={"help": "Maximum number of newly generated tokens."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(default=0 ,metadata={"help": "Top-k parameter used for generation."} )
_SCREAMING_SNAKE_CASE : Optional[float] = field(default=0.95 ,metadata={"help": "Top-p parameter used for nucleus sampling."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(default=10 ,metadata={"help": "Number of generations to run in parallel."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=200 ,metadata={"help": "Number of completions to generate for each sample."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(default=1 ,metadata={"help": "Random seed used for evaluation."} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default="eval_results.json" ,metadata={"help": "Random seed used for evaluation."} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default="0" ,metadata={"help": "Allow `code_eval` to execute Python code on machine"} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=-1 ,metadata={
"help": (
"Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"
" number corresponds to which GPU device id to run on."
)
} ,)
@dataclass
class UpperCamelCase__ :
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__lowercase ,metadata={
"help": "The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."
} ,)
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default="transformersbook/codeparrot" ,metadata={"help": "Folder or name of dataset to process."} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default="codeparrot-clean" ,metadata={"help": "Folder to save processed processed dataset."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=100_000 ,metadata={"help": "Number of files to save per JSON output file."} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(default="content" ,metadata={"help": "Column containing text data to process."} )
_SCREAMING_SNAKE_CASE : Optional[float] = field(
default=1_000 ,metadata={"help": "Maximum line length in file, otherwise file is filtered."} )
_SCREAMING_SNAKE_CASE : Optional[float] = field(
default=100 ,metadata={"help": "Maximum mean line length in file, otherwise file is filtered."} )
_SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.25 ,metadata={"help": "Maximum fraction of non-alphanumeric characters, otherwise file is filtered."} )
_SCREAMING_SNAKE_CASE : Optional[float] = field(
default=1.5 ,metadata={"help": "Minimum character token ratio for the file, otherwise file is filtered."} )
_SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.7 ,metadata={"help": "Probability for filtering config, test and uncommon files."} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default="codeparrot/codeparrot" ,metadata={"help": "Name or path to the tokenizer."} ,)
_SCREAMING_SNAKE_CASE : Optional[bool] = field(
default=__lowercase ,metadata={"help": "If True, near-duplicate samples are removed."} )
_SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.85 ,metadata={"help": "Jaccard threshold for near-duplicate samples."} )
@dataclass
class UpperCamelCase__ :
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default="gpt2" ,metadata={"help": "Base tokenizer to build new tokenizer from."} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default="transformersbook/codeparrot-train" ,metadata={"help": "Dataset to train tokenizer on."} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(default="content" ,metadata={"help": "Column containing text data to process."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(default=200_000 ,metadata={"help": "Number of examples to train tokenizer on."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=32_768 ,metadata={"help": "Number of examples to train the tokenizer on."} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(default="codeparrot" ,metadata={"help": "Name of new tokenizer."} )
_SCREAMING_SNAKE_CASE : Optional[bool] = field(default=__lowercase ,metadata={"help": "Push saved tokenizer to the hub."} )
@dataclass
class UpperCamelCase__ :
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default="codeparrot/codeparrot" ,metadata={"help": "Name or path to the tokenizer."} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default="codeparrot/codeparrot-clean-train" ,metadata={"help": "Name or path to the dataset to pretokenize."} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default="tokenized-codeparrot-train" ,metadata={"help": "Repo name of the pretokenized data."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(default=__lowercase ,metadata={"help": "Number of workers used for code evaluation."} )
@dataclass
class UpperCamelCase__ :
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default="gpt2-large" ,metadata={"help": "Configuration to use for model initialization."} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default="codeparrot/codeparrot" ,metadata={"help": "Tokenizer attached to model."} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(default="codeparrot" ,metadata={"help": "Name of the created model."} )
_SCREAMING_SNAKE_CASE : Optional[bool] = field(default=__lowercase ,metadata={"help": "Push saved tokenizer to the hub."} )
| 326 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.